diff --git a/.clang-format b/.clang-format index e58d518b3b8cacdd1e13dd965805fa364a996eb2..56ca83e724ad0b804a10b9be0dd42aa7a05eeaf7 100644 --- a/.clang-format +++ b/.clang-format @@ -88,4 +88,3 @@ Standard: Auto TabWidth: 8 UseTab: Never ... - diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..912b302ad23d47c46708d672175a908f2dbc74e8 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.py linguist-detectable=false diff --git a/CONTRIBUTING-CN.md b/CONTRIBUTING-CN.md new file mode 100644 index 0000000000000000000000000000000000000000..19f3000d45b53a4fcdab9d0e0ebbf2da7e2d94f8 --- /dev/null +++ b/CONTRIBUTING-CN.md @@ -0,0 +1,58 @@ +# 贡献指南 + +我们感谢所有开发者提交贡献。随时关注我们,Fork 存储库,报告错误,以及在 GitHub 上提交您的代码。但是,我们希望开发者遵循我们的指南,才能更好的做出贡献。 + +## 报告错误 + +- 任何用户都可以通过 **[GitHub issue tracker](https://github.com/taosdata/TDengine/issues)** 向我们报告错误。请您对所遇到的问题进行**详细描述**,最好提供重现错误的详细步骤。 +- 欢迎提供包含由 Bug 生成的日志文件的附录。 + +## 需要强调的代码提交规则 + +- 在提交代码之前,需要**同意贡献者许可协议(CLA)**。点击 [TaosData CLA](https://cla-assistant.io/taosdata/TDengine) 阅读并签署协议。如果您不接受该协议,请停止提交。 +- 请在 [GitHub issue tracker](https://github.com/taosdata/TDengine/issues) 中解决问题或添加注册功能。 +- 如果在 [GitHub issue tracker](https://github.com/taosdata/TDengine/issues) 中没有找到相应的问题或功能,请**创建一个新的 issue**。 +- 将代码提交到我们的存储库时,请创建**包含问题编号的 PR**。 + +## 贡献指南 + +1. 请用友好的语气书写。 + +2. **主动语态**总体上优于被动语态。主动语态中的句子会突出执行动作的人,而不是被动语态突出动作的接受者。 + +3. 文档写作建议 + +- 正确拼写产品名称 “TDengine”。 “TD” 用大写字母,“TD” 和 “engine” 之间没有空格 **(正确拼写:TDengine)**。 +- 在句号或其他标点符号后只留一个空格。 + +4. 尽量**使用简单句**,而不是复杂句。 + +## 给贡献者的礼品 + +只要您是为 TDengine 做贡献的开发者,不管是代码贡献、修复 bug 或功能请求,还是文档更改,您都将会获得一份**特别的贡献者纪念品礼物**! + +

+ + + + +TDengine 社区致力于让更多的开发者理解和使用它。 +请填写**贡献者提交表**以选择您想收到的礼物。 + +- [贡献者提交表](https://page.ma.scrmtech.com/form/index?pf_uid=27715_2095&id=12100) + +## 联系我们 + +如果您有什么问题需要解决,或者有什么问题需要解答,可以添加微信:TDengineECO diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3b1a66839d3d4779f00090a84e6895bd0d660d0d..5be84bec3483ac2f79f43941465df3b50047e661 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,15 +1,64 @@ # Contributing -We appreciate contributions from all developers. Feel free to follow us, fork the repository, report bugs and even submit your code on GitHub. However, we would like developers to follow our guides to contribute for better corporation. +We appreciate contributions from all developers. Feel free to follow us, fork the repository, report bugs, and even submit your code on GitHub. However, we would like developers to follow the guidelines in this document to ensure effective cooperation. -## Report bugs +## Reporting a bug -Any users can report bugs to us through the [github issue tracker](https://github.com/taosdata/TDengine/issues). We appreciate a detailed description of the problem you met. It is better to provide the detailed steps on reproducing the bug. Otherwise, an appendix with log files generated by the bug is welcome. +- Any users can report bugs to us through the **[GitHub issue tracker](https://github.com/taosdata/TDengine/issues)**. We would appreciate if you could provide **a detailed description** of the problem you encountered, including steps to reproduce it. -## Read the contributor license agreement +- Attaching log files caused by the bug is really appreciated. -It is required to agree the Contributor Licence Agreement(CLA) before a user submitting his/her code patch. Follow the [TaosData CLA](https://www.taosdata.com/en/contributor/) link to read through the agreement. +## Guidelines for committing code -## Submit your code +- You must agree to the **Contributor License Agreement(CLA) before submitting your code patch**. Follow the **[TAOSData CLA](https://cla-assistant.io/taosdata/TDengine)** link to read through and sign the agreement. If you do not accept the agreement, your contributions cannot be accepted. -Before submitting your code, make sure to [read the contributor license agreement](#read-the-contributor-license-agreement) beforehand. If you don't accept the aggreement, please stop submitting. Your submission means you have accepted the agreement. Your submission should solve an issue or add a feature registered in the [github issue tracker](https://github.com/taosdata/TDengine/issues). If no corresponding issue or feature is found in the issue tracker, please create one. When submitting your code to our repository, please create a pull request with the issue number included. +- Please solve an issue or add a feature registered in the **[GitHub issue tracker](https://github.com/taosdata/TDengine/issues)**. +- If no corresponding issue or feature is found in the issue tracker, please **create one**. +- When submitting your code to our repository, please create a pull request with the **issue number** included. + +## Guidelines for communicating + +1. Please be **nice and polite** in the description. +2. **Active voice is better than passive voice in general**. Sentences in the active voice will highlight who is performing the action rather than the recipient of the action highlighted by the passive voice. +3. Documentation writing advice + +- Spell the product name "TDengine" correctly. "TD" is written in capital letters, and there is no space between "TD" and "engine" (**Correct spelling: TDengine**). +- Please **capitalize the first letter** of every sentence. +- Leave **only one space** after periods or other punctuation marks. +- Use **American spelling**. +- When possible, **use second person** rather than first person (e.g.“You are recommended to use a reverse proxy such as Nginx.” rather than “We recommend to use a reverse proxy such as Nginx.”). + +5. Use **simple sentences**, rather than complex sentences. + +## Gifts for the contributors + +Developers, as long as you contribute to TDengine, whether it's code contributions to fix bugs or feature requests, or documentation changes, **you are eligible for a very special Contributor Souvenir Gift!** + +**You can choose one of the following gifts:** + +

+ + + + +The TDengine community is committed to making TDengine accepted and used by more developers. + +Just fill out the **Contributor Submission Form** to choose your desired gift. + +- [Contributor Submission Form](https://page.ma.scrmtech.com/form/index?pf_uid=27715_2095&id=12100) + +## Contact us + +If you have any problems or questions that need help from us, please feel free to add our WeChat account: TDengineECO. diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 754617f99f71fa90380e42fa2b9b5f3248620d7c..d7df07f06afd8e1e483455e3ce925a03f28740fd 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -1,6 +1,7 @@ import hudson.model.Result import hudson.model.*; import jenkins.model.CauseOfInterruption +docs_only=0 node { } @@ -29,6 +30,49 @@ def abort_previous(){ if (buildNumber > 1) milestone(buildNumber - 1) milestone(buildNumber) } +def check_docs() { + if (env.CHANGE_URL =~ /\/TDengine\//) { + sh ''' + hostname + date + env + ''' + sh ''' + cd ${WKC} + git reset --hard + git clean -fxd + rm -rf examples/rust/ + git remote prune origin + git fetch + ''' + script { + sh ''' + cd ${WKC} + git checkout ''' + env.CHANGE_TARGET + ''' + ''' + } + sh ''' + cd ${WKC} + git remote prune origin + git pull >/dev/null + git fetch origin +refs/pull/${CHANGE_ID}/merge + git checkout -qf FETCH_HEAD + ''' + def file_changed = sh ( + script: ''' + cd ${WKC} + git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/" || : + ''', + returnStdout: true + ).trim() + if (file_changed == '') { + echo "docs PR" + docs_only=1 + } else { + echo file_changed + } + } +} def pre_test(){ sh ''' hostname @@ -43,6 +87,7 @@ def pre_test(){ cd ${WKC} git reset --hard git clean -fxd + rm -rf examples/rust/ git remote prune origin git fetch ''' @@ -306,10 +351,27 @@ pipeline { WKPY = '/var/lib/jenkins/workspace/taos-connector-python' } stages { + stage('check') { + when { + allOf { + not { expression { env.CHANGE_BRANCH =~ /docs\// }} + not { expression { env.CHANGE_URL =~ /\/TDinternal\// }} + } + } + parallel { + stage('check docs') { + agent{label " worker03 || slave215 || slave217 || slave219 || Mac_catalina "} + steps { + check_docs() + } + } + } + } stage('run test') { when { allOf { not { expression { env.CHANGE_BRANCH =~ /docs\// }} + expression { docs_only == 0 } } } parallel { diff --git a/README-CN.md b/README-CN.md index 7df2733a2e76f602363f219d61cc1f877f48f12e..0b7e42d4fa19045e94f004ab61159a8d79e4bb82 100644 --- a/README-CN.md +++ b/README-CN.md @@ -21,17 +21,17 @@ TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供缓存、数据订阅、流式计算等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。与其他时序数据库相比,TDengine 的主要优势如下: -- 高性能:通过创新的存储引擎设计,无论是数据写入还是查询,TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,存储空间不及通用数据库的1/10。 +- **高性能**:通过创新的存储引擎设计,无论是数据写入还是查询,TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,存储空间不及通用数据库的1/10。 -- 云原生:通过原生分布式的设计,充分利用云平台的优势,TDengine 提供了水平扩展能力,具备弹性、韧性和可观测性,支持k8s部署,可运行在公有云、私有云和混合云上。 +- **云原生**:通过原生分布式的设计,充分利用云平台的优势,TDengine 提供了水平扩展能力,具备弹性、韧性和可观测性,支持k8s部署,可运行在公有云、私有云和混合云上。 -- 极简时序数据平台:TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本。 +- **极简时序数据平台**:TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本。 -- 分析能力:支持 SQL,同时为时序数据特有的分析提供SQL扩展。通过超级表、存储计算分离、分区分片、预计算、自定义函数等技术,TDengine 具备强大的分析能力。 +- **分析能力**:支持 SQL,同时为时序数据特有的分析提供SQL扩展。通过超级表、存储计算分离、分区分片、预计算、自定义函数等技术,TDengine 具备强大的分析能力。 -- 简单易用:无任何依赖,安装、集群几秒搞定;提供REST以及各种语言连接器,与众多第三方工具无缝集成;提供命令行程序,便于管理和即席查询;提供各种运维工具。 +- **简单易用**:无任何依赖,安装、集群几秒搞定;提供REST以及各种语言连接器,与众多第三方工具无缝集成;提供命令行程序,便于管理和即席查询;提供各种运维工具。 -- 核心开源:TDengine 的核心代码包括集群功能全部开源,截止到2022年8月1日,全球超过 135.9k 个运行实例,GitHub Star 18.7k,Fork 4.4k,社区活跃。 +- **核心开源**:TDengine 的核心代码包括集群功能全部开源,截止到2022年8月1日,全球超过 135.9k 个运行实例,GitHub Star 18.7k,Fork 4.4k,社区活跃。 # 文档 @@ -210,14 +210,14 @@ cmake .. -G "NMake Makefiles" nmake ``` -### macOS 系统 + # 安装 @@ -303,14 +303,14 @@ Query OK, 2 row(s) in set (0.001700s) TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java、Python、Go、Node.js、C# 、RESTful 等,便于用户快速开发应用: -- [Java](https://docs.taosdata.com/reference/connector/java/) -- [C/C++](https://www.taosdata.com/cn/documentation/connector#c-cpp) -- [Python](https://docs.taosdata.com/reference/connector/python/) -- [Go](https://docs.taosdata.com/reference/connector/go/) -- [Node.js](https://docs.taosdata.com/reference/connector/node/) -- [Rust](https://docs.taosdata.com/reference/connector/rust/) -- [C#](https://docs.taosdata.com/reference/connector/csharp/) -- [RESTful API](https://docs.taosdata.com/reference/rest-api/) +- [Java](https://docs.taosdata.com/connector/java/) +- [C/C++](https://docs.taosdata.com/connector/cpp/) +- [Python](https://docs.taosdata.com/connector/python/) +- [Go](https://docs.taosdata.com/connector/go/) +- [Node.js](https://docs.taosdata.com/connector/node/) +- [Rust](https://docs.taosdata.com/connector/rust/) +- [C#](https://docs.taosdata.com/connector/csharp/) +- [RESTful API](https://docs.taosdata.com/connector/rest-api/) # 成为社区贡献者 diff --git a/README.md b/README.md index c915fe3aef8d46389af223708146a6a47dc8af0a..611d97aac9436bdcc732efcf98822f2dd11d74ab 100644 --- a/README.md +++ b/README.md @@ -15,43 +15,33 @@ [![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201) - English | [简体中文](README-CN.md) | We are hiring, check [here](https://tdengine.com/careers) # What is TDengine? +TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/what-is-a-time-series-database/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-seires databases with the following advantages: -TDengine is an open source, high performance , cloud native time-series database (Time-Series Database, TSDB). - -TDengine can be optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT, IT operation and maintenance, finance and other fields. In addition to the core time series database functions, TDengine also provides functions such as caching, data subscription, and streaming computing. It is a minimalist time series data processing platform that minimizes the complexity of system design and reduces R&D and operating costs. Compared with other time series databases, the main advantages of TDengine are as follows: - +- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression. -- High-Performance: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression. +- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly. -- Simplified Solution: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly. +- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds. -- Cloud Native: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds. +- **[Ease of Use](https://docs.tdengine.com/get-started/docker/)**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access. -- Ease of Use: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access. +- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way. -- Easy Data Analytics: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way. - -- Open Source: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 18.8k stars on GitHub, an active developer community, and over 137k running instances worldwide. +- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 18.8k stars on GitHub. There is an active developer community, and over 139k running instances worldwide. # Documentation -For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.taosdata.com) ([TDengine 文档](https://docs.taosdata.com)) +For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.tdengine.com) ([TDengine 文档](https://docs.taosdata.com)) # Building +At the moment, TDengine server supports running on Linux and Windows systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. -At the moment, TDengine server supports running on Linux, Windows systems.Any OS application can also choose the RESTful interface of taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU , and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. - - - -You can choose to install through source code according to your needs, [container](https://docs.taosdata.com/get-started/docker/), [installation package](https://docs.taosdata.com/get-started/package/) or [Kubenetes](https://docs.taosdata.com/deployment/k8s/) to install. This quick guide only applies to installing from source. - - +You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to installing from source. TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine. @@ -67,7 +57,6 @@ sudo apt-get install -y gcc cmake build-essential git libssl-dev #### Install build dependencies for taosTools - To build the [taosTools](https://github.com/taosdata/taos-tools) on Ubuntu/Debian, the following packages need to be installed. ```bash @@ -91,14 +80,13 @@ sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel #### Install build dependencies for taosTools on CentOS - #### CentOS 7.9 ``` sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libstdc++-static openssl-devel ``` -#### CentOS 8/Rocky Linux +#### CentOS 8/Rocky Linux ``` sudo yum install -y epel-release @@ -109,14 +97,14 @@ sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgco Note: Since snappy lacks pkg-config support (refer to [link](https://github.com/google/snappy/pull/86)), it leads a cmake prompt libsnappy not found. But snappy still works well. -If the powertools installation fails, you can try to use: +If the PowerTools installation fails, you can try to use: + ``` -sudo yum config-manager --set-enabled Powertools +sudo yum config-manager --set-enabled powertools ``` ### Setup golang environment - TDengine includes a few components like taosAdapter developed by Go language. Please refer to golang.org official documentation for golang environment setup. Please use version 1.14+. For the user in China, we recommend using a proxy to accelerate package downloading. @@ -134,7 +122,7 @@ cmake .. -DBUILD_HTTP=false ### Setup rust environment -TDengine includes a few compoments developed by Rust language. Please refer to rust-lang.org official documentation for rust environment setup. +TDengine includes a few components developed by Rust language. Please refer to rust-lang.org official documentation for rust environment setup. ## Get the source codes @@ -145,7 +133,6 @@ git clone https://github.com/taosdata/TDengine.git cd TDengine ``` - You can modify the file ~/.gitconfig to use ssh protocol instead of https for better download speed. You will need to upload ssh public key to GitHub first. Please refer to GitHub official documentation for detail. ``` @@ -155,14 +142,12 @@ You can modify the file ~/.gitconfig to use ssh protocol instead of https for be ## Special Note - [JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go),[Python Connector](https://github.com/taosdata/taos-connector-python),[Node.js Connector](https://github.com/taosdata/taos-connector-node),[C# Connector](https://github.com/taosdata/taos-connector-dotnet) ,[Rust Connector](https://github.com/taosdata/taos-connector-rust) and [Grafana plugin](https://github.com/taosdata/grafanaplugin) has been moved to standalone repository. ## Build TDengine ### On Linux platform - You can run the bash script `build.sh` to build both TDengine and taosTools including taosBenchmark and taosdump as below: ```bash @@ -178,7 +163,6 @@ cmake .. -DBUILD_TOOLS=true make ``` - You can use Jemalloc as memory allocator instead of glibc: ``` @@ -227,14 +211,14 @@ cmake .. -G "NMake Makefiles" nmake ``` -### On macOS platform + # Installing @@ -246,7 +230,7 @@ After building successfully, TDengine can be installed by sudo make install ``` -Users can find more information about directories installed on the system in the [directory and files](https://docs.taosdata.com/reference/directory/) section. +Users can find more information about directories installed on the system in the [directory and files](https://docs.taosdata.com/reference/directory/) section. Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.taosdata.com/get-started/package/) for it. @@ -272,6 +256,7 @@ After building successfully, TDengine can be installed by: nmake install ``` + ## Quick Run @@ -318,16 +304,16 @@ Query OK, 2 row(s) in set (0.001700s) ## Official Connectors -TDengine provides abundant developing tools for users to develop on TDengine. include C/C++、Java、Python、Go、Node.js、C# 、RESTful ,Follow the links below to find your desired connectors and relevant documentation. +TDengine provides abundant developing tools for users to develop on TDengine. Follow the links below to find your desired connectors and relevant documentation. -- [Java](https://docs.taosdata.com/reference/connector/java/) -- [C/C++](https://docs.taosdata.com/reference/connector/cpp/) -- [Python](https://docs.taosdata.com/reference/connector/python/) -- [Go](https://docs.taosdata.com/reference/connector/go/) -- [Node.js](https://docs.taosdata.com/reference/connector/node/) -- [Rust](https://docs.taosdata.com/reference/connector/rust/) -- [C#](https://docs.taosdata.com/reference/connector/csharp/) -- [RESTful API](https://docs.taosdata.com/reference/rest-api/) +- [Java](https://docs.tdengine.com/reference/connector/java/) +- [C/C++](https://docs.tdengine.com/reference/connector/cpp/) +- [Python](https://docs.tdengine.com/reference/connector/python/) +- [Go](https://docs.tdengine.com/reference/connector/go/) +- [Node.js](https://docs.tdengine.com/reference/connector/node/) +- [Rust](https://docs.tdengine.com/reference/connector/rust/) +- [C#](https://docs.tdengine.com/reference/connector/csharp/) +- [RESTful API](https://docs.tdengine.com/reference/rest-api/) # Contribute to TDengine diff --git a/cmake/cmake.define b/cmake/cmake.define index 5639d212d7308cbd452720b890ca79b442d9c47c..5d64815a9aa90741a0d6aca7e51518d2263932a2 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -2,8 +2,6 @@ cmake_minimum_required(VERSION 3.0) set(CMAKE_VERBOSE_MAKEFILE OFF) -SET(BUILD_SHARED_LIBS "OFF") - #set output directory SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib) SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/bin) @@ -81,7 +79,7 @@ ENDIF () IF (TD_WINDOWS) MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}") - SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi") + SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd") SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO") # IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900)) # SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18") @@ -92,11 +90,20 @@ IF (TD_WINDOWS) IF (CMAKE_DEPFILE_FLAGS_CXX) SET(CMAKE_DEPFILE_FLAGS_CXX "") ENDIF () + IF (CMAKE_C_FLAGS_DEBUG) + SET(CMAKE_C_FLAGS_DEBUG "" CACHE STRING "" FORCE) + ENDIF () + IF (CMAKE_CXX_FLAGS_DEBUG) + SET(CMAKE_CXX_FLAGS_DEBUG "" CACHE STRING "" FORCE) + ENDIF () SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMMON_FLAGS}") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}") ELSE () + IF (${TD_DARWIN}) + set(CMAKE_MACOSX_RPATH 0) + ENDIF () IF (${COVER} MATCHES "true") MESSAGE(STATUS "Test coverage mode, add extra flags") SET(GCC_COVERAGE_COMPILE_FLAGS "-fprofile-arcs -ftest-coverage") diff --git a/cmake/cmake.install b/cmake/cmake.install index 4e3d0b166aba447cf48fb664f429a885caece953..fd1e080ddab1478f73689e7cced405ae8404fbc2 100644 --- a/cmake/cmake.install +++ b/cmake/cmake.install @@ -1,38 +1,24 @@ -IF (EXISTS /var/lib/taos/dnode/dnodeCfg.json) - INSTALL(CODE "MESSAGE(\"The default data directory /var/lib/taos contains old data of tdengine 2.x, please clear it before installing!\")") -ELSEIF (EXISTS C:/TDengine/data/dnode/dnodeCfg.json) - INSTALL(CODE "MESSAGE(\"The default data directory C:/TDengine/data contains old data of tdengine 2.x, please clear it before installing!\")") -ELSEIF (TD_LINUX) +SET(PREPARE_ENV_CMD "prepare_env_cmd") +SET(PREPARE_ENV_TARGET "prepare_env_target") +ADD_CUSTOM_COMMAND(OUTPUT ${PREPARE_ENV_CMD} + POST_BUILD + COMMAND echo "make test directory" + DEPENDS taosd + COMMAND ${CMAKE_COMMAND} -E make_directory ${TD_TESTS_OUTPUT_DIR}/cfg/ + COMMAND ${CMAKE_COMMAND} -E make_directory ${TD_TESTS_OUTPUT_DIR}/log/ + COMMAND ${CMAKE_COMMAND} -E make_directory ${TD_TESTS_OUTPUT_DIR}/data/ + COMMAND ${CMAKE_COMMAND} -E echo dataDir ${TD_TESTS_OUTPUT_DIR}/data > ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg + COMMAND ${CMAKE_COMMAND} -E echo logDir ${TD_TESTS_OUTPUT_DIR}/log >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg + COMMAND ${CMAKE_COMMAND} -E echo charset UTF-8 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg + COMMAND ${CMAKE_COMMAND} -E echo monitor 0 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg + COMMENT "prepare taosd environment") +ADD_CUSTOM_TARGET(${PREPARE_ENV_TARGET} ALL WORKING_DIRECTORY ${TD_EXECUTABLE_OUTPUT_PATH} DEPENDS ${PREPARE_ENV_CMD}) + +IF (TD_LINUX) SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.sh") INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")") INSTALL(CODE "execute_process(COMMAND bash ${TD_MAKE_INSTALL_SH} ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Linux ${TD_VER_NUMBER})") ELSEIF (TD_WINDOWS) - SET(CMAKE_INSTALL_PREFIX C:/TDengine) - - # INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/go DESTINATION connector) - # INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/nodejs DESTINATION connector) - # INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/python DESTINATION connector) - # INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/C\# DESTINATION connector) - # INSTALL(DIRECTORY ${TD_SOURCE_DIR}/examples DESTINATION .) - INSTALL(CODE "IF (NOT EXISTS ${CMAKE_INSTALL_PREFIX}/cfg/taos.cfg) - execute_process(COMMAND ${CMAKE_COMMAND} -E copy ${TD_SOURCE_DIR}/packaging/cfg/taos.cfg ${CMAKE_INSTALL_PREFIX}/cfg/taos.cfg) - ENDIF ()") - INSTALL(FILES ${TD_SOURCE_DIR}/include/client/taos.h DESTINATION include) - INSTALL(FILES ${TD_SOURCE_DIR}/include/util/taoserror.h DESTINATION include) - INSTALL(FILES ${TD_SOURCE_DIR}/include/libs/function/taosudf.h DESTINATION include) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos_static.lib DESTINATION driver) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver) - INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .) - INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosd.exe DESTINATION .) - INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/udfd.exe DESTINATION .) - IF (BUILD_TOOLS) - INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosBenchmark.exe DESTINATION .) - ENDIF () - - IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.38-dist.jar DESTINATION connector/jdbc) - ENDIF () SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.bat") INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")") INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} :needAdmin ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Windows ${TD_VER_NUMBER})") diff --git a/cmake/cmake.options b/cmake/cmake.options index bec64f7bf00cdb0c6fddc713af0801eae08d45ea..3baccde4d711e7c7a535829c95a0ee8cdff3fae6 100644 --- a/cmake/cmake.options +++ b/cmake/cmake.options @@ -90,6 +90,12 @@ ELSE () ENDIF () ENDIF () +option( + BUILD_SHARED_LIBS + "" + OFF + ) + option( RUST_BINDINGS "If build with rust-bindings" diff --git a/cmake/cmake.platform b/cmake/cmake.platform index 887fbd86d55d782cdf3c1d7c95dfee2dc2ec446d..3aa1ffc07e73acdf480a21b478d55e05153694f8 100644 --- a/cmake/cmake.platform +++ b/cmake/cmake.platform @@ -46,7 +46,7 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin MESSAGE("Current system processor is ${CMAKE_SYSTEM_PROCESSOR}.") IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "x86_64") - MESSAGE("Current system arch is arm64") + MESSAGE("Current system arch is 64") SET(TD_DARWIN_64 TRUE) ADD_DEFINITIONS("-D_TD_DARWIN_64") ENDIF () diff --git a/cmake/cmake.version b/cmake/cmake.version index c8afc1a291d705e2da29042de3ac3299b7bfd265..db29644b387306ce8f3ee473921dab4c7d05b10a 100644 --- a/cmake/cmake.version +++ b/cmake/cmake.version @@ -2,7 +2,7 @@ IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "3.0.0.0") + SET(TD_VER_NUMBER "3.0.0.1") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/cmake/libuv_CMakeLists.txt.in b/cmake/libuv_CMakeLists.txt.in index 14228b775f46fbde420b9e99117f3ebc0de03e88..9c48ddefef166b0bf0bdc5f3f0da7f501a351643 100644 --- a/cmake/libuv_CMakeLists.txt.in +++ b/cmake/libuv_CMakeLists.txt.in @@ -2,7 +2,7 @@ # libuv ExternalProject_Add(libuv GIT_REPOSITORY https://github.com/libuv/libuv.git - GIT_TAG v1.42.0 + GIT_TAG v1.44.2 SOURCE_DIR "${TD_CONTRIB_DIR}/libuv" BINARY_DIR "${TD_CONTRIB_DIR}/libuv" CONFIGURE_COMMAND "" diff --git a/cmake/rust-bindings_CMakeLists.txt.in b/cmake/rust-bindings_CMakeLists.txt.in deleted file mode 100644 index d16e86139b20fa94505953bc56108f1f61dbbffb..0000000000000000000000000000000000000000 --- a/cmake/rust-bindings_CMakeLists.txt.in +++ /dev/null @@ -1,12 +0,0 @@ - -# rust-bindings -ExternalProject_Add(rust-bindings - GIT_REPOSITORY https://github.com/songtianyi/tdengine-rust-bindings.git - GIT_TAG 7ed7a97 - SOURCE_DIR "${TD_SOURCE_DIR}/examples/rust" - BINARY_DIR "${TD_SOURCE_DIR}/examples/rust" - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" - TEST_COMMAND "" - ) diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in index ed8216be9127b8c5164c977420d3ccd8f6521d31..f182beed33c76200649f93d96b68c153ec452b9a 100644 --- a/cmake/taosadapter_CMakeLists.txt.in +++ b/cmake/taosadapter_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosadapter ExternalProject_Add(taosadapter GIT_REPOSITORY https://github.com/taosdata/taosadapter.git - GIT_TAG 3d21433 + GIT_TAG abed566 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 3a6eb3c25a553d588c9eb7eb703c6e4dd3da53db..68caf9a9acde518be86c143168245e1d01a4a389 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 53a0103 + GIT_TAG 9cb965f SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index b4e8825431475c09fbf925671e6d7f691c700b15..2dc7622f4601ad8b1fb5c578c6aa8b5f0df02daf 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -105,11 +105,6 @@ if(${BUILD_WITH_SQLITE}) cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif(${BUILD_WITH_SQLITE}) -# rust-bindings -if(${RUST_BINDINGS}) - cat("${TD_SUPPORT_DIR}/rust-bindings_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) -endif(${RUST_BINDINGS}) - # lucene if(${BUILD_WITH_LUCENE}) cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -140,24 +135,6 @@ execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") execute_process(COMMAND "${CMAKE_COMMAND}" --build . WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") - -# clear submodule -execute_process(COMMAND git submodule deinit -f tools/taos-tools - WORKING_DIRECTORY "${TD_SOURCE_DIR}") -execute_process(COMMAND git rm --cached tools/taos-tools - WORKING_DIRECTORY "${TD_SOURCE_DIR}") -execute_process(COMMAND git submodule deinit -f tools/taosadapter - WORKING_DIRECTORY "${TD_SOURCE_DIR}") -execute_process(COMMAND git rm --cached tools/taosadapter - WORKING_DIRECTORY "${TD_SOURCE_DIR}") -execute_process(COMMAND git submodule deinit -f tools/taosws-rs - WORKING_DIRECTORY "${TD_SOURCE_DIR}") -execute_process(COMMAND git rm --cached tools/taosws-rs - WORKING_DIRECTORY "${TD_SOURCE_DIR}") -execute_process(COMMAND git submodule deinit -f examples/rust - WORKING_DIRECTORY "${TD_SOURCE_DIR}") -execute_process(COMMAND git rm --cached examples/rust - WORKING_DIRECTORY "${TD_SOURCE_DIR}") # ================================================================================================ # Build @@ -273,7 +250,7 @@ endif(${BUILD_WITH_NURAFT}) # pthread if(${BUILD_PTHREAD}) - set(CMAKE_BUILD_TYPE release) + set(CMAKE_BUILD_TYPE debug) add_definitions(-DPTW32_STATIC_LIB) add_subdirectory(pthread EXCLUDE_FROM_ALL) set_target_properties(libpthreadVC3 PROPERTIES OUTPUT_NAME pthread) @@ -354,9 +331,11 @@ endif(${BUILD_WITH_TRAFT}) # LIBUV if(${BUILD_WITH_UV}) - if (NOT ${CMAKE_SYSTEM_NAME} MATCHES "Windows") - MESSAGE("Windows need set no-sign-compare") - add_compile_options(-Wno-sign-compare) + if (TD_WINDOWS) + # There is no GetHostNameW function on win7. + file(READ "libuv/src/win/util.c" LIBUV_WIN_UTIL_CONTENT) + string(REPLACE "if (GetHostNameW(buf, UV_MAXHOSTNAMESIZE" "DWORD nSize = UV_MAXHOSTNAMESIZE;\n if (GetComputerNameW(buf, &nSize" LIBUV_WIN_UTIL_CONTENT "${LIBUV_WIN_UTIL_CONTENT}") + file(WRITE "libuv/src/win/util.c" "${LIBUV_WIN_UTIL_CONTENT}") endif () add_subdirectory(libuv EXCLUDE_FROM_ALL) endif(${BUILD_WITH_UV}) diff --git a/docs/assets/contributing-cup.jpg b/docs/assets/contributing-cup.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2bf935132a9c2395a06efd92ff51ecb7244caac5 Binary files /dev/null and b/docs/assets/contributing-cup.jpg differ diff --git a/docs/assets/contributing-notebook.jpg b/docs/assets/contributing-notebook.jpg new file mode 100644 index 0000000000000000000000000000000000000000..de32051cad6f659f6bf104290189076086bcb3a5 Binary files /dev/null and b/docs/assets/contributing-notebook.jpg differ diff --git a/docs/assets/contributing-shirt.jpg b/docs/assets/contributing-shirt.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bffe3aff1ac9bacbd008c997edbaf793af1e2de9 Binary files /dev/null and b/docs/assets/contributing-shirt.jpg differ diff --git a/docs/en/01-index.md b/docs/en/01-index.md index f5b7f3e0f61507efbb09506b48548c12317e700b..5265be42f81c4f43fa73e5b7d603d8989c2a5671 100644 --- a/docs/en/01-index.md +++ b/docs/en/01-index.md @@ -4,24 +4,24 @@ sidebar_label: Documentation Home slug: / --- -TDengine is a [high-performance](https://tdengine.com/fast), [scalable](https://tdengine.com/scalable) time series database with [SQL support](https://tdengine.com/sql-support). This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators. +TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) time-series database optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It’s written mainly for architects, developers, and system administrators. To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section. -TDengine greatly improves the efficiency of data ingestion, querying and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [“Concepts”](./concept) thoroughly. +TDengine greatly improves the efficiency of data ingestion, querying, and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [Concepts](./concept) thoroughly. -If you are a developer, please read the [“Developer Guide”](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, make a few changes to accommodate your application, and it will work. +If you are a developer, please read the [Developer Guide](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, and make a few changes to accommodate your application, and it will work. -We live in the era of big data, and scale-up is unable to meet the growing needs of business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to ["cluster"](./cluster). +We live in the era of big data, and scale-up is unable to meet the growing needs of the business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to [Cluster Deployment](../deployment). -TDengine uses ubiquitious SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll up, interpolation and time weighted average, among many others. The ["SQL Reference"](./taos-sql) chapter describes the SQL syntax in detail, and lists the various supported commands and functions. +TDengine uses ubiquitous SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll-up, interpolation, and time-weighted average, among many others. The [SQL Reference](./taos-sql) chapter describes the SQL syntax in detail and lists the various supported commands and functions. -If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the ["Administration"](./operation) section. +If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the [Administration](./operation) section. -If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the ["Reference"](./reference) chapter. +If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the [Reference](./reference) chapter. -If you are very interested in the internal design of TDengine, please read the chapter ["Inside TDengine”](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully. +If you are very interested in the internal design of TDengine, please read the chapter [Inside TDengine](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully. -TDengine is an open source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation, or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly. +TDengine is an open-source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly. -Together, we make a difference. +Together, we make a difference! diff --git a/docs/en/02-intro/index.md b/docs/en/02-intro/index.md index f6766f910f4d7560b782bf02ffa97922523e6167..d385845d7c57203d6e1cc8ddb8d53307f2655914 100644 --- a/docs/en/02-intro/index.md +++ b/docs/en/02-intro/index.md @@ -3,7 +3,7 @@ title: Introduction toc_max_heading_level: 2 --- -TDengine is a high-performance, scalable time-series database with SQL support. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](/develop/cache), [stream processing](/develop/continuous-query), [data subscription](/develop/subscribe) and other functionalities to reduce the complexity and cost of development and operation. +TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation. This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine. @@ -11,52 +11,69 @@ This section introduces the major features, competitive advantages, typical use- The major features are listed below: -1. While TDengine supports [using SQL to insert](/develop/insert-data/sql-writing), it also supports [Schemaless writing](/reference/schemaless/) just like NoSQL databases. TDengine also supports standard protocols like [InfluxDB LINE](/develop/insert-data/influxdb-line),[OpenTSDB Telnet](/develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](/develop/insert-data/opentsdb-json) among others. -2. TDengine supports seamless integration with third-party data collection agents like [Telegraf](/third-party/telegraf),[Prometheus](/third-party/prometheus),[StatsD](/third-party/statsd),[collectd](/third-party/collectd),[icinga2](/third-party/icinga2), [TCollector](/third-party/tcollector), [EMQX](/third-party/emq-broker), [HiveMQ](/third-party/hive-mq-broker). These agents can write data into TDengine with simple configuration and without a single line of code. -3. Support for [all kinds of queries](/develop/query-data), including aggregation, nested query, downsampling, interpolation and others. -4. Support for [user defined functions](/develop/udf). -5. Support for [caching](/develop/cache). TDengine always saves the last data point in cache, so Redis is not needed in some scenarios. -6. Support for [continuous query](/develop/continuous-query). -7. Support for [data subscription](/develop/subscribe) with the capability to specify filter conditions. -8. Support for [cluster](/cluster/), with the capability of increasing processing power by adding more nodes. High availability is supported by replication. -9. Provides an interactive [command-line interface](/reference/taos-shell) for management, maintenance and ad-hoc queries. -10. Provides many ways to [import](/operation/import) and [export](/operation/export) data. -11. Provides [monitoring](/operation/monitor) on running instances of TDengine. -12. Provides [connectors](/reference/connector/) for [C/C++](/reference/connector/cpp), [Java](/reference/connector/java), [Python](/reference/connector/python), [Go](/reference/connector/go), [Rust](/reference/connector/rust), [Node.js](/reference/connector/node) and other programming languages. -13. Provides a [REST API](/reference/rest-api/). -14. Supports seamless integration with [Grafana](/third-party/grafana) for visualization. -15. Supports seamless integration with Google Data Studio. - -For more details on features, please read through the entire documentation. +1. Insert data + - Supports [using SQL to insert](../develop/insert-data/sql-writing). + - Supports [schemaless writing](../reference/schemaless/) just like NoSQL databases. It also supports standard protocols like [InfluxDB Line](../develop/insert-data/influxdb-line), [OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json) among others. + - Supports seamless integration with third-party tools like [Telegraf](../third-party/telegraf/), [Prometheus](../third-party/prometheus/), [collectd](../third-party/collectd/), [StatsD](../third-party/statsd/), [TCollector](../third-party/tcollector/), [EMQX](../third-party/emq-broker), [HiveMQ](../third-party/hive-mq-broker), and [Icinga2](../third-party/icinga2/), they can write data into TDengine with simple configuration and without a single line of code. +2. Query data + - Supports standard [SQL](../taos-sql/), including nested query. + - Supports [time series specific functions](../taos-sql/function/#time-series-extensions) and [time series specific queries](../taos-sql/distinguished), like downsampling, interpolation, cumulated sum, time weighted average, state window, session window and many others. + - Supports [User Defined Functions (UDF)](../taos-sql/udf). +3. [Caching](../develop/cache/): TDengine always saves the last data point in cache, so Redis is not needed for time-series data processing. +4. [Stream Processing](../develop/stream/): Not only is the continuous query is supported, but TDengine also supports event driven stream processing, so Flink or Spark is not needed for time-series data processing. +5. [Data Subscription](../develop/tmq/): Application can subscribe a table or a set of tables. API is the same as Kafka, but you can specify filter conditions. +6. Visualization + - Supports seamless integration with [Grafana](../third-party/grafana/) for visualization. + - Supports seamless integration with Google Data Studio. +7. Cluster + - Supports [cluster](../deployment/) with the capability of increasing processing power by adding more nodes. + - Supports [deployment on Kubernetes](../deployment/k8s/). + - Supports high availability via data replication. +8. Administration + - Provides [monitoring](../operation/monitor) on running instances of TDengine. + - Provides many ways to [import](../operation/import) and [export](../operation/export) data. +9. Tools + - Provides an interactive [Command-line Interface (CLI)](../reference/taos-shell) for management, maintenance and ad-hoc queries. + - Provides a tool [taosBenchmark](../reference/taosbenchmark/) for testing the performance of TDengine. +10. Programming + - Provides [connectors](../reference/connector/) for [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) and other programming languages. + - Provides a [REST API](../reference/rest-api/). + +For more details on features, please read through the entire documentation. ## Competitive Advantages -Time-series data is structured, not transactional, and is rarely deleted or updated. TDengine makes full use of [these characteristics of time series data](https://tdengine.com/2019/07/09/86.html) to build its own innovative storage engine and computing engine to differentiate itself from other time series databases, with the following advantages. +By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other time series databases, with the following advantages. -- **[High Performance](https://tdengine.com/fast)**: With an innovatively designed and purpose-built storage engine, TDengine outperforms other time series databases in data ingestion and querying while significantly reducing storage costs and compute costs. +- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression. -- **[Scalable](https://tdengine.com/scalable)**: TDengine provides out-of-box scalability and high-availability through its native distributed design. Nodes can be added through simple configuration to achieve greater data processing power. In addition, this feature is open source. +- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly. -- **[SQL Support](https://tdengine.com/sql-support)**: TDengine uses SQL as the query language, thereby reducing learning and migration costs, while adding SQL extensions to better handle time-series. Keeping NoSQL developers in mind, TDengine also supports convenient and flexible, schemaless data ingestion. +- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for Kubernetes deployment and full observability, TDengine is a cloud native Time-series Database and can be deployed on public, private or hybrid clouds. -- **All in One**: TDengine has built-in caching, stream processing and data subscription functions. It is no longer necessary to integrate Kafka/Redis/HBase/Spark or other software in some scenarios. It makes the system architecture much simpler, cost-effective and easier to maintain. +- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to[ + ](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access. -- **Seamless Integration**: Without a single line of code, TDengine provide seamless, configurable integration with third-party tools such as Telegraf, Grafana, EMQX, Prometheus, StatsD, collectd, etc. More third-party tools are being integrated. +- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way. -- **Zero Management**: Installation and cluster setup can be done in seconds. Data partitioning and sharding are executed automatically. TDengine’s running status can be monitored via Grafana or other DevOps tools. +- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide. -- **Zero Learning Costs**: With SQL as the query language and support for ubiquitous tools like Python, Java, C/C++, Go, Rust, and Node.js connectors, and a REST API, there are zero learning costs. +With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. -- **Interactive Console**: TDengine provides convenient console access to the database, through a CLI, to run ad hoc queries, maintain the database, or manage the cluster, without any programming. - -With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly 2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly 3: With its simple architecture and zero management, the operation and maintenance costs are reduced. +1. With its superior performance, the computing and storage resources are reduced significantly. +2. With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly. +3. With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly. ## Technical Ecosystem + This is how TDengine would be situated, in a typical time-series data processing platform: +

+ ![TDengine Database Technical Ecosystem ](eco_system.webp) -
Figure 1. TDengine Technical Ecosystem
+
Figure 1. TDengine Technical Ecosystem
+
On the left-hand side, there are data collection agents like OPC-UA, MQTT, Telegraf and Kafka. On the right-hand side, visualization/BI tools, HMI, Python/R, and IoT Apps can be connected. TDengine itself provides an interactive command-line interface and a web interface for management and maintenance. @@ -66,48 +83,47 @@ As a high-performance, scalable and SQL supported time-series database, TDengine ### Characteristics and Requirements of Data Sources -| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | -| -------------------------------------------------------- | ------------------ | ----------------------- | ------------------- | :----------------------------------------------------------- | -| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry.| -| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. | -| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. | +| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | +| ------------------------------------------------ | ------------------ | ----------------------- | ------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry. | +| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. | +| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. | ### System Architecture Requirements -| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | -| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ | +| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | +| ----------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | A simple and reliable system architecture | | | √ | TDengine's system architecture is very simple and reliable, with its own message queue, cache, stream computing, monitoring and other functions. There is no need to integrate any additional third-party products. | -| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. | -| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. | +| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. | +| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. | ### System Function Requirements -| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | -| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ | -| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level.| -| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. | +| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | +| -------------------------------------------- | ------------------ | ----------------------- | ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level. | +| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. | ### System Performance Requirements -| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | -| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ | -| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. | -| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products.| -| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. | +| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | +| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- | +| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. | +| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products. | +| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. | ### System Maintenance Requirements -| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | -| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ | -| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. | -| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the Taos shell for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs.| -| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine.| +| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | +| --------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. | +| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the TDengine CLI for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs. | +| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine. | ## Comparison with other databases - [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/2022/02/23/4975.html) - [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/2022/02/24/5120.html) -- [TDengine vs InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse](https://www.tdengine.com/downloads/TDengine_Testing_Report_en.pdf) - [TDengine vs OpenTSDB](https://tdengine.com/2019/09/12/710.html) - [TDengine vs Cassandra](https://tdengine.com/2019/09/12/708.html) - [TDengine vs InfluxDB](https://tdengine.com/2019/09/12/706.html) diff --git a/docs/en/04-concept/index.md b/docs/en/04-concept/index.md index 850f705146c4829db579f14be1a686ef9052f678..b0a0c25d85e99244858a461317ff54359d1ceff8 100644 --- a/docs/en/04-concept/index.md +++ b/docs/en/04-concept/index.md @@ -2,7 +2,7 @@ title: Concepts --- -In order to explain the basic concepts and provide some sample code, the TDengine documentation smart meters as a typical time series use case. We assume the following: 1. Each smart meter collects three metrics i.e. current, voltage, and phase 2. There are multiple smart meters, and 3. Each meter has static attributes like location and group ID. Based on this, collected data will look similar to the following table: +In order to explain the basic concepts and provide some sample code, the TDengine documentation smart meters as a typical time series use case. We assume the following: 1. Each smart meter collects three metrics i.e. current, voltage, and phase; 2. There are multiple smart meters; 3. Each meter has static attributes like location and group ID. Based on this, collected data will look similar to the following table:
@@ -104,19 +104,19 @@ Each row contains the device ID, time stamp, collected metrics (current, voltage ## Metric -Metric refers to the physical quantity collected by sensors, equipment or other types of data collection devices, such as current, voltage, temperature, pressure, GPS position, etc., which change with time, and the data type can be integer, float, Boolean, or strings. As time goes by, the amount of collected metric data stored increases. +Metric refers to the physical quantity collected by sensors, equipment or other types of data collection devices, such as current, voltage, temperature, pressure, GPS position, etc., which change with time, and the data type can be integer, float, Boolean, or strings. As time goes by, the amount of collected metric data stored increases. In the smart meters example, current, voltage and phase are the metrics. ## Label/Tag -Label/Tag refers to the static properties of sensors, equipment or other types of data collection devices, which do not change with time, such as device model, color, fixed location of the device, etc. The data type can be any type. Although static, TDengine allows users to add, delete or update tag values at any time. Unlike the collected metric data, the amount of tag data stored does not change over time. +Label/Tag refers to the static properties of sensors, equipment or other types of data collection devices, which do not change with time, such as device model, color, fixed location of the device, etc. The data type can be any type. Although static, TDengine allows users to add, delete or update tag values at any time. Unlike the collected metric data, the amount of tag data stored does not change over time. In the meters example, `location` and `groupid` are the tags. ## Data Collection Point -Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points. +Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points. In the smart meters example, d1001, d1002, d1003, and d1004 are the data collection points. ## Table -Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them with a short learning curve. You need to create a database, create tables, then insert data points and execute queries to explore the data. +Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them with a short learning curve. You need to create a database, create tables, then insert data points and execute queries to explore the data. To make full use of time-series data characteristics, TDengine adopts a strategy of "**One Table for One Data Collection Point**". TDengine requires the user to create a table for each data collection point (DCP) to store collected time-series data. For example, if there are over 10 million smart meters, it means 10 million tables should be created. For the table above, 4 tables should be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several benefits: @@ -125,25 +125,28 @@ To make full use of time-series data characteristics, TDengine adopts a strategy 3. The metric data from a DCP is continuously stored, block by block. If you read data for a period of time, it can greatly reduce random read operations and improve read and query performance by orders of magnitude. 4. Inside a data block for a DCP, columnar storage is used, and different compression algorithms are used for different data types. Metrics generally don't vary as significantly between themselves over a time range as compared to other metrics, which allows for a higher compression rate. -If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.** +If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. ** One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.** TDengine suggests using DCP ID as the table name (like D1001 in the above table). Each DCP may collect one or multiple metrics (like the current, voltage, phase as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. Column wise storage is used. +Complex devices, such as connected cars, may have multiple DCPs. In this case, multiple tables are created for a single device, one table per DCP. + ## Super Table (STable) The design of one table for one data collection point will require a huge number of tables, which is difficult to manage. Furthermore, applications often need to take aggregation operations among DCPs, thus aggregation operations will become complicated. To support aggregation over multiple tables efficiently, the STable(Super Table) concept is introduced by TDengine. STable is a template for a type of data collection point. A STable contains a set of data collection points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable, in addition to defining the table structure of the metrics, it is also necessary to define the schema of its tags. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established. -In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**. +In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**. In the smart meters example, we can create a super table named `meters`. ## Subtable -When creating a table for a specific data collection point, the user can use a STable as a template and specify the tag values of this specific DCP to create it. **The table created by using a STable as the template is called subtable** in TDengine. The difference between regular table and subtable is: +When creating a table for a specific data collection point, the user can use a STable as a template and specify the tag values of this specific DCP to create it. ** The table created by using a STable as the template is called subtable** in TDengine. The difference between regular table and subtable is: + 1. Subtable is a table, all SQL commands applied on a regular table can be applied on subtable. 2. Subtable is a table with extensions, it has static tags (labels), and these tags can be added, deleted, and updated after it is created. But a regular table does not have tags. 3. A subtable belongs to only one STable, but a STable may have many subtables. Regular tables do not belong to a STable. -4. A regular table can not be converted into a subtable, and vice versa. +4. A regular table can not be converted into a subtable, and vice versa. The relationship between a STable and the subtables created based on this STable is as follows: @@ -151,13 +154,15 @@ The relationship between a STable and the subtables created based on this STable 2. The schema of metrics or labels cannot be adjusted through subtables, and it can only be changed via STable. Changes to the schema of a STable takes effect immediately for all associated subtables. 3. STable defines only one template and does not store any data or label information by itself. Therefore, data cannot be written to a STable, only to subtables. -Queries can be executed on both a table (subtable) and a STable. For a query on a STable, TDengine will treat the data in all its subtables as a whole data set for processing. TDengine will first find the subtables that meet the tag filter conditions, then scan the time-series data of these subtables to perform aggregation operation, which reduces the number of data sets to be scanned which in turn greatly improves the performance of data aggregation across multiple DCPs. +Queries can be executed on both a table (subtable) and a STable. For a query on a STable, TDengine will treat the data in all its subtables as a whole data set for processing. TDengine will first find the subtables that meet the tag filter conditions, then scan the time-series data of these subtables to perform aggregation operation, which reduces the number of data sets to be scanned which in turn greatly improves the performance of data aggregation across multiple DCPs. In essence, querying a supertable is a very efficient aggregate query on multiple DCPs of the same type. + +In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. In the smart meters example, we can create subtables like d1001, d1002, d1003, and d1004 under super table meters. -In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. +To better understand the data model using metri, tags, super table and subtable, please refer to the diagram below which demonstrates the data model of the smart meters example. ![Meters Data Model Diagram](./supertable.webp) ## Database -A database is a collection of tables. TDengine allows a running instance to have multiple databases, and each database can be configured with different storage policies. Different types of DCPs often have different data characteristics, including the frequency of data collection, data retention time, the number of replications, the size of data blocks, whether data is allowed to be updated, and so on. In order for TDengine to work with maximum efficiency in various scenarios, TDengine recommends that STables with different data characteristics be created in different databases. +A database is a collection of tables. TDengine allows a running instance to have multiple databases, and each database can be configured with different storage policies. The [characteristics of time-series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. In order for TDengine to work with maximum efficiency in various scenarios, TDengine recommends that STables with different data characteristics be created in different databases. In a database, there can be one or more STables, but a STable belongs to only one database. All tables owned by a STable are stored in only one database. @@ -167,4 +172,4 @@ FQDN (Fully Qualified Domain Name) is the full domain name of a specific compute Each node of a TDengine cluster is uniquely identified by an End Point, which consists of an FQDN and a Port, such as h1.tdengine.com:6030. In this way, when the IP changes, we can still use the FQDN to dynamically find the node without changing any configuration of the cluster. In addition, FQDN is used to facilitate unified access to the same cluster from the Intranet and the Internet. -TDengine does not recommend using an IP address to access the cluster. FQDN is recommended for cluster management. +TDengine does not recommend using an IP address to access the cluster. FQDN is recommended for cluster management. diff --git a/docs/en/04-concept/supertable.webp b/docs/en/04-concept/supertable.webp new file mode 100644 index 0000000000000000000000000000000000000000..764b8f3de7ee92a103b2fcd0e75c03773af5ee37 Binary files /dev/null and b/docs/en/04-concept/supertable.webp differ diff --git a/docs/en/05-get-started/01-docker.md b/docs/en/05-get-started/01-docker.md index 14f5a8800072971a2ffa8550c838212d7b6a9907..de5b620a779557a8a3b8422a14caf67b354d1e7a 100644 --- a/docs/en/05-get-started/01-docker.md +++ b/docs/en/05-get-started/01-docker.md @@ -1,103 +1,99 @@ --- sidebar_label: Docker -title: 通过 Docker 快速体验 TDengine +title: Quick Install on Docker --- -:::info -如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装. -::: -本节首先介绍如何通过 Docker 快速体验 TDengine,然后介绍如何在 Docker 环境下体验 TDengine 的写入和查询功能。 +This document describes how to install TDengine in a Docker container and perform queries and inserts. To get started with TDengine in a non-containerized environment, see [Quick Install](../../get-started/package). If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine). -## 启动 TDengine +## Run TDengine -如果已经安装了 docker, 只需执行下面的命令。 +If Docker is already installed on your computer, run the following command: ```shell -docker run -d -p 6030:6030 -p 6041/6041 -p 6043-6049/6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine +docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine ``` -注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。 +Note that TDengine Server uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed. -确定该容器已经启动并且在正常运行 +Run the following command to ensure that your container is running: ```shell docker ps ``` -进入该容器并执行 bash +Enter the container and open the bash shell: ```shell docker exec -it bash ``` -然后就可以执行相关的 Linux 命令操作和访问 TDengine +You can now access TDengine or run other Linux commands. -## 运行 TDengine CLI +Note: For information about installing docker, see the [official documentation](https://docs.docker.com/get-docker/). -进入容器,执行 taos +## Insert Data into TDengine -``` -$ taos -Welcome to the TDengine shell from Linux, Client Version:3.0.0.0 -Copyright (c) 2022 by TAOS Data, Inc. All rights reserved. +You can use the `taosBenchmark` tool included with TDengine to write test data into your deployment. -Server is Community Edition. +To do so, run the following command: -taos> + ```bash + $ taosBenchmark + + ``` -``` +This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`. -## 写入数据 + The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required depends on the hardware specifications of the local system. -可以使用 TDengine 的自带工具 taosBenchmark 快速体验 TDengine 的写入。 + You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](/reference/taosbenchmark). -进入容器,启动 taosBenchmark: +## Open the TDengine CLI - ```bash - $ taosBenchmark - - ``` +On the container, run the following command to open the TDengine CLI: - 该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "San Francisco" 或者 "Los Angeles"等城市名称。 +``` +$ taos - 这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能。 +taos> - taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照 [taosBenchmark 参考手册](../../reference/taosbenchmark)。 +``` -## 体验查询 +## Query Data in TDengine -使用上述 taosBenchmark 插入数据后,可以在 TDengine CLI 输入查询命令,体验查询速度。。 +After using taosBenchmark to create your test deployment, you can run queries in the TDengine CLI to test its performance. For example: -查询超级表下记录总条数: +From the TDengine CLI query the number of rows in the `meters` supertable: ```sql -taos> select count(*) from test.meters; +select count(*) from test.meters; ``` -查询 1 亿条记录的平均值、最大值、最小值等: +Query the average, maximum, and minimum values of all 100 million rows of data: ```sql -taos> select avg(current), max(voltage), min(phase) from test.meters; +select avg(current), max(voltage), min(phase) from test.meters; ``` -查询 location="San Francisco" 的记录总条数: +Query the number of rows whose `location` tag is `San Francisco`: ```sql -taos> select count(*) from test.meters where location="San Francisco"; +select count(*) from test.meters where location="San Francisco"; ``` -查询 groupId=10 的所有记录的平均值、最大值、最小值等: +Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`: ```sql -taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10; +select avg(current), max(voltage), min(phase) from test.meters where groupId=10; ``` -对表 d10 按 10s 进行平均值、最大值和最小值聚合统计: +Query the average, maximum, and minimum values for table `d10` in 1 second intervals: ```sql -taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); +select first(ts), avg(current), max(voltage), min(phase) from test.d10 interval(1s); ``` +In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be _wstart which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/). -## 其它 +## Additional Information -更多关于在 Docker 环境下使用 TDengine 的细节,请参考 [在 Docker 下使用 TDengine](../../reference/docker) +For more information about deploying TDengine in a Docker environment, see [Using TDengine in Docker](../../reference/docker). diff --git a/docs/en/05-get-started/03-package.md b/docs/en/05-get-started/03-package.md index 6423cc710523a0be95e95ba9e50556e332659dfd..88096a759c58529d4150c0a750a4354a88da988f 100644 --- a/docs/en/05-get-started/03-package.md +++ b/docs/en/05-get-started/03-package.md @@ -1,160 +1,208 @@ --- -sidebar_label: 安装包 -title: 使用安装包立即开始 +sidebar_label: Package +title: Quick Install from Package --- import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; +import PkgListV3 from "/components/PkgListV3"; -:::info -如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装. +For information about installing TDengine on Docker, see [Quick Install on Docker](../../get-started/docker). If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine). -::: +The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. Note that taosAdapter supports Linux only. In addition to connectors for multiple languages, TDengine also provides a [REST API](../../reference/rest-api) through [taosAdapter](../../reference/taosadapter). + +The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download a lite package that includes only `taosd` and the C/C++ connector. -TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统,rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。 +The TDengine Community Edition is released as .deb and .rpm packages. The .deb package can be installed on Debian, Ubuntu, and derivative systems. The .rpm package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the .deb or .rpm package, download and install taosTools separately. TDengine can also be installed on 64-bit Windows servers. -## 安装 +## Installation - -可以使用 apt-get 工具从官方仓库安装。 + -**安装包仓库** +1. Download the .deb installation package. + +2. In the directory where the package is located, use `dpkg` to install the package: ```bash -wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add - -echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list +# Enter the name of the package that you downloaded. +sudo dpkg -i TDengine-server--Linux-x64.deb ``` -如果安装 Beta 版需要安装包仓库 + -```bash -echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list -``` + -**使用 apt-get 命令安装** +1. Download the .rpm installation package. + +2. In the directory where the package is located, use rpm to install the package: ```bash -sudo apt-get update -apt-cache policy tdengine -sudo apt-get install tdengine +# Enter the name of the package that you downloaded. +sudo rpm -ivh TDengine-server--Linux-x64.rpm ``` -:::tip -apt-get 方式只适用于 Debian 或 Ubuntu 系统 -:::: - -1、从官网下载获得 deb 安装包,例如 TDengine-server-3.0.0.0-Linux-x64.deb; -2、进入到 TDengine-server-3.0.0.0-Linux-x64.deb 安装包所在目录,执行如下的安装命令: + + +1. Download the .tar.gz installation package. + +2. In the directory where the package is located, use `tar` to decompress the package: ```bash -sudo dpkg -i TDengine-server-3.0.0.0-Linux-x64.deb +# Enter the name of the package that you downloaded. +tar -zxvf TDengine-server--Linux-x64.tar.gz ``` - - - - -1、从官网下载获得 rpm 安装包,例如 TDengine-server-3.0.0.0-Linux-x64.rpm; -2、进入到 TDengine-server-3.0.0.0-Linux-x64.rpm 安装包所在目录,执行如下的安装命令: +In the directory to which the package was decompressed, run `install.sh`: ```bash -sudo rpm -ivh TDengine-server-3.0.0.0-Linux-x64.rpm +sudo ./install.sh ``` +:::info +Users will be prompted to enter some configuration information when install.sh is executing. The interactive mode can be disabled by executing `./install.sh -e no`. `./install.sh -h` can show all parameters with detailed explanation. +::: + - + +You can use `apt-get` to install TDengine from the official package repository. -1、从官网下载获得 tar.gz 安装包,例如 TDengine-server-3.0.0.0-Linux-x64.tar.gz; -2、进入到 TDengine-server-3.0.0.0-Linux-x64.tar.gz 安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本: +**Configure the package repository** ```bash -tar -zxvf TDengine-server-3.0.0.0-Linux-x64.tar.gz +wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add - +echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list ``` -解压后进入相应路径,执行 +You can install beta versions by configuring the following repository: ```bash -sudo ./install.sh +wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add - +echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list ``` -:::info -install.sh 安装脚本在执行过程中,会通过命令行交互界面询问一些配置信息。如果希望采取无交互安装方式,那么可以用 -e no 参数来执行 install.sh 脚本。运行 `./install.sh -h` 指令可以查看所有参数的详细说明信息。 +**Install TDengine with `apt-get`** -::: +```bash +sudo apt-get update +apt-cache policy tdengine +sudo apt-get install tdengine +``` + +:::tip +This installation method is supported only for Debian and Ubuntu. +:::: + + + +Note: TDengine only supports Windows Server 2016/2019 and windows 10/11 system versions on the windows platform. + +1. Download the Windows installation package. + +2. Run the downloaded package to install TDengine. +:::info +For information about TDengine releases, see [Release History](../../releases). +::: + :::note -当安装第一个节点时,出现 Enter FQDN:提示的时候,不需要输入任何内容。只有当安装第二个或以后更多的节点时,才需要输入已有集群中任何一个可用节点的 FQDN,支持该新节点加入集群。当然也可以不输入,而是在新节点启动前,配置到新节点的配置文件中。 +On the first node in your TDengine cluster, leave the `Enter FQDN:` prompt blank and press **Enter**. On subsequent nodes, you can enter the end point of the first dnode in the cluster. You can also configure this setting after you have finished installing TDengine. ::: -## 启动 +## Quick Launch + + + -安装后,请使用 `systemctl` 命令来启动 TDengine 的服务进程。 +After the installation is complete, run the following command to start the TDengine service: ```bash systemctl start taosd ``` -检查服务是否正常工作: +Run the following command to confirm that TDengine is running normally: ```bash systemctl status taosd ``` -如果服务进程处于活动状态,则 status 指令会显示如下的相关信息: +Output similar to the following indicates that TDengine is running normally: ``` Active: active (running) ``` -如果后台服务进程处于停止状态,则 status 指令会显示如下的相关信息: +Output similar to the following indicates that TDengine has not started successfully: ``` Active: inactive (dead) ``` -如果 TDengine 服务正常工作,那么您可以通过 TDengine 的命令行程序 `taos` 来访问并体验 TDengine。 +After confirming that TDengine is running, run the `taos` command to access the TDengine CLI. -systemctl 命令汇总: +The following `systemctl` commands can help you manage TDengine: -- 启动服务进程:`systemctl start taosd` +- Start TDengine Server: `systemctl start taosd` -- 停止服务进程:`systemctl stop taosd` +- Stop TDengine Server: `systemctl stop taosd` -- 重启服务进程:`systemctl restart taosd` +- Restart TDengine Server: `systemctl restart taosd` -- 查看服务状态:`systemctl status taosd` +- Check TDengine Server status: `systemctl status taosd` :::info -- systemctl 命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 sudo 。 -- `systemctl stop taosd` 指令在执行后并不会马上停止 TDengine 服务,而是会等待系统中必要的落盘工作正常完成。在数据量很大的情况下,这可能会消耗较长时间。 -- 如果系统中不支持 `systemd`,也可以用手动运行 `/usr/local/taos/bin/taosd` 方式启动 TDengine 服务。 +- The `systemctl` command requires _root_ privileges. If you are not logged in as the `root` user, use the `sudo` command. +- The `systemctl stop taosd` command does not instantly stop TDengine Server. The server is stopped only after all data in memory is flushed to disk. The time required depends on the cache size. +- If your system does not include `systemd`, you can run `/usr/local/taos/bin/taosd` to start TDengine manually. ::: -## TDengine 命令行 (CLI) + + + + +After the installation is complete, run `C:\TDengine\taosd.exe` to start TDengine Server. + + + + +## Test data insert performance + +After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance: -为便于检查 TDengine 的状态,执行数据库 (Database) 的各种即席(Ad Hoc)查询,TDengine 提供一命令行应用程序(以下简称为 TDengine CLI) taos。要进入 TDengine 命令行,您只要在安装有 TDengine 的 Linux 终端执行 `taos` 即可。 +```bash +taosBenchmark +``` + +This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`. + +The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in less than a minute. + +You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](../../reference/taosbenchmark). + +## Command Line Interface + +You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, run the following command: ```bash taos ``` -如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考 [FAQ](/train-faq/faq) 来解决终端连接服务端失败的问题)。 TDengine CLI 的提示符号如下: +The TDengine CLI displays a welcome message and version information to indicate that its connection to the TDengine service was successful. If an error message is displayed, see the [FAQ](/train-faq/faq) for troubleshooting information. At the following prompt, you can execute SQL commands. ```cmd taos> ``` -在 TDengine CLI 中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行数据库(database)插入查询操作。在终端中运行的 SQL 语句需要以分号结束来运行。示例: +For example, you can create and delete databases and tables and run all types of queries. Each SQL command must be end with a semicolon (;). For example: ```sql create database demo; @@ -170,52 +218,39 @@ select * from t; Query OK, 2 row(s) in set (0.003128s) ``` -除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在 Linux 或 Windows 机器上运行,更多细节请参考 [这里](../../reference/taos-shell/) - -## 使用 taosBenchmark 体验写入速度 - -启动 TDengine 的服务,在 Linux 终端执行 `taosBenchmark` (曾命名为 `taosdemo`): - -```bash -taosBenchmark -``` - -该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.LosAngeles"。 - -这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。 - -taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照 [如何使用 taosBenchmark 对 TDengine 进行性能测试](https://www.taosdata.com/2021/10/09/3111.html)。 - -## 使用 TDengine CLI 体验查询速度 +You can also can monitor the deployment status, add and remove user accounts, and manage running instances. You can run the TDengine CLI on either Linux or Windows machines. For more information, see [TDengine CLI](../../reference/taos-shell/). + +## Test data query performance -使用上述 taosBenchmark 插入数据后,可以在 TDengine CLI 输入查询命令,体验查询速度。 +After using taosBenchmark to create your test deployment, you can run queries in the TDengine CLI to test its performance: -查询超级表下记录总条数: +From the TDengine CLI query the number of rows in the `meters` supertable: ```sql -taos> select count(*) from test.meters; +select count(*) from test.meters; ``` -查询 1 亿条记录的平均值、最大值、最小值等: +Query the average, maximum, and minimum values of all 100 million rows of data: ```sql -taos> select avg(current), max(voltage), min(phase) from test.meters; +select avg(current), max(voltage), min(phase) from test.meters; ``` -查询 location="California.SanFrancisco" 的记录总条数: +Query the number of rows whose `location` tag is `San Francisco`: ```sql -taos> select count(*) from test.meters where location="California.SanFrancisco"; +select count(*) from test.meters where location="San Francisco"; ``` -查询 groupId=10 的所有记录的平均值、最大值、最小值等: +Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`: ```sql -taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10; +select avg(current), max(voltage), min(phase) from test.meters where groupId=10; ``` -对表 d10 按 10s 进行平均值、最大值和最小值聚合统计: +Query the average, maximum, and minimum values for table `d10` in 1 second intervals: ```sql -taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); +select first(ts), avg(current), max(voltage), min(phase) from test.d10 interval(1s); ``` +In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be _wstart which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/). diff --git a/docs/en/05-get-started/_apt_get_install.mdx b/docs/en/05-get-started/_apt_get_install.mdx index b1bc4a13517bbfdc9eda86a58b89aee8e41fa470..acbcf22122152f970891ca92c20b9b1b7d97fe1a 100644 --- a/docs/en/05-get-started/_apt_get_install.mdx +++ b/docs/en/05-get-started/_apt_get_install.mdx @@ -1,19 +1,19 @@ -可以使用 apt-get 工具从官方仓库安装。 +You can use `apt-get` to install TDengine from the official package repository. -**安装包仓库** +**Configure the package repository** ``` wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add - echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list ``` -如果安装 Beta 版需要安装包仓库 +You can install beta versions by configuring the following package repository: ``` echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list ``` -**使用 apt-get 命令安装** +**Install TDengine with `apt-get`** ``` sudo apt-get update @@ -22,5 +22,5 @@ sudo apt-get install tdengine ``` :::tip -apt-get 方式只适用于 Debian 或 Ubuntu 系统 +This installation method is supported only for Debian and Ubuntu. :::: diff --git a/docs/en/05-get-started/_category_.yml b/docs/en/05-get-started/_category_.yml index b2348fade63c7bb717eac3e6e6b8dfda3c73b17a..043ae21554ffd8f274c6afe41c5ae5e7da742b26 100644 --- a/docs/en/05-get-started/_category_.yml +++ b/docs/en/05-get-started/_category_.yml @@ -1 +1 @@ -label: 立即开始 +label: Get Started diff --git a/docs/en/05-get-started/_pkg_install.mdx b/docs/en/05-get-started/_pkg_install.mdx index 83c987af8bcf24a9593105b680d32a0421344d5f..32d7c1f376bc6404c45db39fbaa24a5012a11f01 100644 --- a/docs/en/05-get-started/_pkg_install.mdx +++ b/docs/en/05-get-started/_pkg_install.mdx @@ -1,17 +1,17 @@ import PkgList from "/components/PkgList"; -TDengine 的安装非常简单,从下载到安装成功仅仅只要几秒钟。 +TDengine is easy to download and install. -为方便使用,从 2.4.0.10 开始,标准的服务端安装包包含了 taos、taosd、taosAdapter、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。 +The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download a lite package that includes only `taosd` and the C/C++ connector. -在安装包格式上,我们提供 tar.gz, rpm 和 deb 格式,为企业客户提供 tar.gz 格式安装包,以方便在特定操作系统上使用。需要注意的是,rpm 和 deb 包不含 taosdump、taosBenchmark 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。 +You can download the TDengine installation package in .rpm, .deb, or .tar.gz format. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the .deb or .rpm package, download and install taosTools separately. -发布版本包括稳定版和 Beta 版,Beta 版含有更多新功能。正式上线或测试建议安装稳定版。您可以根据需要选择下载: +Between official releases, beta versions may be released that contain new features. Do not use beta versions for production or testing environments. Select the installation package appropriate for your system. -具体的安装方法,请参见[安装包的安装和卸载](/operation/pkg-install)。 +For information about installing TDengine, see [Install and Uninstall](/operation/pkg-install). -下载其他组件、最新 Beta 版及之前版本的安装包,请点击[这里](https://www.taosdata.com/all-downloads) +For information about TDengine releases, see [All Downloads](https://tdengine.com/all-downloads) -查看 Release Notes, 请点击[这里](https://github.com/taosdata/TDengine/releases) +and [Release Notes](https://github.com/taosdata/TDengine/releases). diff --git a/docs/en/05-get-started/index.md b/docs/en/05-get-started/index.md index 794081b4e4c438dee2d8cbe125de4094056f190f..cf475a8cd79e15880a4356a89f46c0dd6a8c307d 100644 --- a/docs/en/05-get-started/index.md +++ b/docs/en/05-get-started/index.md @@ -1,11 +1,11 @@ --- -title: 立即开始 -description: '快速设置 TDengine 环境并体验其高效写入和查询' +title: Get Started +description: This article describes how to install TDengine and test its performance. --- -TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](/reference/taosadapter) 提供 [RESTful 接口](/reference/rest-api)。 +The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](/reference/rest-api) through [taosAdapter](/reference/taosadapter). -本章主要介绍如何利用 Docker 或者安装包快速设置 TDengine 环境并体验其高效写入和查询。 +You can install and run TDengine on Linux and Windows machines as well as Docker containers. ```mdx-code-block import DocCardList from '@theme/DocCardList'; diff --git a/docs/en/07-develop/01-connect/_connect_php.mdx b/docs/en/07-develop/01-connect/_connect_php.mdx new file mode 100644 index 0000000000000000000000000000000000000000..dbad72bc1988bd5336f1da132dd9e6ba9b8020e6 --- /dev/null +++ b/docs/en/07-develop/01-connect/_connect_php.mdx @@ -0,0 +1,3 @@ +```php title="原生连接" +{{#include docs/examples/php/connect.php}} +``` diff --git a/docs/en/07-develop/01-connect/index.md b/docs/en/07-develop/01-connect/index.md index 1318f4619ba5ff1da23990f881a15f3726401760..901fe69d24640e1cfab5ed4cbc3875ac240e493b 100644 --- a/docs/en/07-develop/01-connect/index.md +++ b/docs/en/07-develop/01-connect/index.md @@ -1,38 +1,39 @@ --- -sidebar_label: Connect title: Connect -description: "This document explains how to establish connections to TDengine, and briefly introduces how to install and use TDengine connectors." +description: "This document explains how to establish connections to TDengine and how to install and use TDengine connectors." --- import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -import ConnJava from "./\_connect_java.mdx"; -import ConnGo from "./\_connect_go.mdx"; -import ConnRust from "./\_connect_rust.mdx"; -import ConnNode from "./\_connect_node.mdx"; -import ConnPythonNative from "./\_connect_python.mdx"; -import ConnCSNative from "./\_connect_cs.mdx"; -import ConnC from "./\_connect_c.mdx"; -import ConnR from "./\_connect_r.mdx"; -import InstallOnWindows from "../../14-reference/03-connector/\_linux_install.mdx"; -import InstallOnLinux from "../../14-reference/03-connector/\_windows_install.mdx"; -import VerifyLinux from "../../14-reference/03-connector/\_verify_linux.mdx"; -import VerifyWindows from "../../14-reference/03-connector/\_verify_windows.mdx"; - -Any application programs running on any kind of platform can access TDengine through the REST API provided by TDengine. For details, please refer to [REST API](/reference/rest-api/). Additionally, application programs can use the connectors of multiple programming languages including C/C++, Java, Python, Go, Node.js, C#, Rust to access TDengine. This chapter describes how to establish a connection to TDengine and briefly introduces how to install and use connectors. TDengine community also provides connectors in LUA and PHP languages. For details about the connectors, please refer to [Connectors](/reference/connector/). +import ConnJava from "./_connect_java.mdx"; +import ConnGo from "./_connect_go.mdx"; +import ConnRust from "./_connect_rust.mdx"; +import ConnNode from "./_connect_node.mdx"; +import ConnPythonNative from "./_connect_python.mdx"; +import ConnCSNative from "./_connect_cs.mdx"; +import ConnC from "./_connect_c.mdx"; +import ConnR from "./_connect_r.mdx"; +import ConnPHP from "./_connect_php.mdx"; +import InstallOnWindows from "../../14-reference/03-connector/_linux_install.mdx"; +import InstallOnLinux from "../../14-reference/03-connector/_windows_install.mdx"; +import VerifyLinux from "../../14-reference/03-connector/_verify_linux.mdx"; +import VerifyWindows from "../../14-reference/03-connector/_verify_windows.mdx"; + +Any application running on any platform can access TDengine through the REST API provided by TDengine. For information, see [REST API](/reference/rest-api/). Applications can also use the connectors for various programming languages, including C/C++, Java, Python, Go, Node.js, C#, and Rust, to access TDengine. These connectors support connecting to TDengine clusters using both native interfaces (taosc). Some connectors also support connecting over a REST interface. Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector. ## Establish Connection There are two ways for a connector to establish connections to TDengine: -1. Connection through the REST API provided by the taosAdapter component, this way is called "REST connection" hereinafter. -2. Connection through the TDengine client driver (taosc), this way is called "Native connection" hereinafter. +1. REST connection through the REST API provided by the taosAdapter component. +2. Native connection through the TDengine client driver (taosc). + +For REST and native connections, connectors provide similar APIs for performing operations and running SQL statements on your databases. The main difference is the method of establishing the connection, which is not visible to users. Key differences: -1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc. -2. The TDengine client driver (taosc) is not supported across all platforms, and applications built on taosc may need to be modified when updating taosc to newer versions. 3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade. +1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc. ## Install Client Driver taosc @@ -136,19 +137,19 @@ Node.js connector provides different ways of establishing connections by providi 1. Install Node.js Native Connector -``` -npm install @tdengine/client -``` + ``` + npm install @tdengine/client + ``` :::note It's recommend to use Node whose version is between `node-v12.8.0` and `node-v13.0.0`. -::: - +::: + 2. Install Node.js REST Connector -``` -npm install @tdengine/rest -``` + ``` + npm install @tdengine/rest + ``` @@ -222,7 +223,7 @@ phpize && ./configure && make -j && make install **Specify TDengine Location:** ```shell -phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install +phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install ``` > `--with-tdengine-dir=` is followed by the TDengine installation location. @@ -236,14 +237,14 @@ phpize && ./configure --enable-swoole && make -j && make install **Enable The Extension:** -Option One: Add `extension=tdengine` in `php.ini` +Option One: Add `extension=tdengine` in `php.ini` Option Two: Specify the extension on CLI `php -d extension=tdengine test.php` -## Establish Connection +## Establish a connection Prior to establishing connection, please make sure TDengine is already running and accessible. The following sample code assumes TDengine is running on the same host as the client program, with FQDN configured to "localhost" and serverPort configured to "6030". @@ -272,9 +273,12 @@ Prior to establishing connection, please make sure TDengine is already running a + + + :::tip -If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.taosdata.com/train-faq/faq). +If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.tdengine.com/train-faq/faq). ::: diff --git a/docs/en/07-develop/02-model/index.mdx b/docs/en/07-develop/02-model/index.mdx index b647c845d070c26398956f8a9de81864b73120e1..3c16ed2df15940f3f59232a37b6f559010305a96 100644 --- a/docs/en/07-develop/02-model/index.mdx +++ b/docs/en/07-develop/02-model/index.mdx @@ -2,31 +2,36 @@ title: Data Model --- -The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the STable (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details. +The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the [STable](/concept/#super-table-stable) (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details. + +Note: before you read this chapter, please make sure you have already read through [Key Concepts](/concept/), since TDengine introduces new concepts like "one table for one [data collection point](/concept/#data-collection-point)" and "[super table](/concept/#super-table-stable)". + + ## Create Database -The [characteristics of time-series data](https://www.taosdata.com/blog/2019/07/09/86.html) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. For TDengine to operate with the best performance, we strongly recommend that you create and configure different databases for data with different characteristics. This allows you, for example, to set up different storage and retention policies. When creating a database, there are a lot of parameters that can be configured such as, the days to keep data, the number of replicas, the number of memory blocks, time precision, the minimum and maximum number of rows in each data block, whether compression is enabled, the time range of the data in single data file and so on. Below is an example of the SQL statement to create a database. +The characteristics of time-series data from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. For TDengine to operate with the best performance, we strongly recommend that you create and configure different databases for data with different characteristics. This allows you, for example, to set up different storage and retention policies. When creating a database, there are a lot of parameters that can be configured such as, the days to keep data, the number of replicas, the size of the cache, time precision, the minimum and maximum number of rows in each data block, whether compression is enabled, the time range of the data in single data file and so on. An example is shown as follows: ```sql -CREATE DATABASE power KEEP 365 DURATION 10 BUFFER 16 VGROUPS 100 WAL 1; +CREATE DATABASE power KEEP 365 DURATION 10 BUFFER 16 WAL_LEVEL 1; ``` In the above SQL statement: -- a database named "power" will be created -- the data in it will be kept for 365 days, which means that data older than 365 days will be deleted automatically +- a database named "power" is created +- the data in it is retained for 365 days, which means that data older than 365 days will be deleted automatically - a new data file will be created every 10 days -- the size of memory cache for writing is 16 MB -- data will be firstly written to WAL without FSYNC - -For more details please refer to [Database](/taos-sql/database). +- the size of the write cache pool on each vnode is 16 MB +- the number of vgroups is 100 +- WAL is enabled but fsync is disabled For more details please refer to [Database](/taos-sql/database). -After creating a database, the current database in use can be switched using SQL command `USE`. For example the SQL statement below switches the current database to `power`. Without the current database specified, table name must be preceded with the corresponding database name. +After creating a database, the current database in use can be switched using SQL command `USE`. For example the SQL statement below switches the current database to `power`. ```sql USE power; ``` +Without the current database specified, table name must be preceded with the corresponding database name. + :::note - Any table or STable must belong to a database. To create a table or STable, the database it belongs to must be ready. @@ -39,14 +44,9 @@ USE power; In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](/concept/#model_table1), the SQL statement below can be used to create the super table. ```sql -CREATE STable meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int); +CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int); ``` -:::note -If you are using versions prior to 2.0.15, the `STable` keyword needs to be replaced with `TABLE`. - -::: - Similar to creating a regular table, when creating a STable, the name and schema need to be provided. In the STable schema, the first column must always be a timestamp (like ts in the example), and the other columns (like current, voltage and phase in the example) are the data collected. The remaining columns can [contain data of type](/taos-sql/data-type/) integer, float, double, string etc. In addition, the schema for tags, like location and groupId in the example, must be provided. The tag type can be integer, float, string, etc. Tags are essentially the static properties of a data collection point. For example, properties like the location, device type, device group ID, manager ID are tags. Tags in the schema can be added, removed or updated. Please refer to [STable](/taos-sql/stable) for more details. For each kind of data collection point, a corresponding STable must be created. There may be many STables in an application. For electrical power system, we need to create a STable respectively for meters, transformers, busbars, switches. There may be multiple kinds of data collection points on a single device, for example there may be one data collection point for electrical data like current and voltage and another data collection point for environmental data like temperature, humidity and wind direction. Multiple STables are required for these kinds of devices. @@ -63,13 +63,8 @@ CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2); In the above SQL statement, "d1001" is the table name, "meters" is the STable name, followed by the value of tag "Location" and the value of tag "groupId", which are "California.SanFrancisco" and "2" respectively in the example. The tag values can be updated after the table is created. Please refer to [Tables](/taos-sql/table) for details. -In the TDengine system, it's recommended to create a table for a data collection point via STable. A table created via STable is called subtable in some parts of the TDengine documentation. All SQL commands applied on regular tables can be applied on subtables. - -:::tip It's suggested to use the globally unique ID of a data collection point as the table name. For example the device serial number could be used as a unique ID. If a unique ID doesn't exist, multiple IDs that are not globally unique can be combined to form a globally unique ID. It's not recommended to use a globally unique ID as tag value. -::: - ## Create Table Automatically In some circumstances, it's unknown whether the table already exists when inserting rows. The table can be created automatically using the SQL statement below, and nothing will happen if the table already exists. @@ -84,8 +79,6 @@ For more details please refer to [Create Table Automatically](/taos-sql/insert#a ## Single Column vs Multiple Column -A multiple columns data model is supported in TDengine. As long as multiple metrics are collected by the same data collection point at the same time, i.e. the timestamps are identical, these metrics can be put in a single STable as columns. - -However, there is another kind of design, i.e. single column data model in which a table is created for each metric. This means that a STable is required for each kind of metric. For example in a single column model, 3 STables would be required for current, voltage and phase. +A multiple columns data model is supported in TDengine. As long as multiple metrics are collected by the same data collection point at the same time, i.e. the timestamps are identical, these metrics can be put in a single STable as columns. However, there is another kind of design, i.e. single column data model in which a table is created for each metric. This means that a STable is required for each kind of metric. For example in a single column model, 3 STables would be required for current, voltage and phase. It's recommended to use a multiple column data model as much as possible because insert and query performance is higher. In some cases, however, the collected metrics may vary frequently and so the corresponding STable schema needs to be changed frequently too. In such cases, it's more convenient to use single column data model. diff --git a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx index da0e87abbc2c83ca940dd596ffbf5746a6b65823..f2168645ff9e59d60e88c85f86e890945b9f336c 100644 --- a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx +++ b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx @@ -1,5 +1,4 @@ --- -sidebar_label: Insert Using SQL title: Insert Using SQL --- @@ -19,13 +18,14 @@ import CsSQL from "./_cs_sql.mdx"; import CsStmt from "./_cs_stmt.mdx"; import CSQL from "./_c_sql.mdx"; import CStmt from "./_c_stmt.mdx"; +import PhpSQL from "./_php_sql.mdx"; +import PhpStmt from "./_php_stmt.mdx"; ## Introduction Application programs can execute `INSERT` statement through connectors to insert rows. The TDengine CLI can also be used to manually insert data. ### Insert Single Row - The below SQL statement is used to insert one row into table "d1001". ```sql @@ -42,7 +42,7 @@ INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, ### Insert into Multiple Tables -Data can be inserted into multiple tables in single SQL statement. The example below inserts 2 rows into table "d1001" and 1 row into table "d1002". +Data can be inserted into multiple tables in the same SQL statement. The example below inserts 2 rows into table "d1001" and 1 row into table "d1002". ```sql INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31); @@ -52,19 +52,19 @@ For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). :::info -- Inserting in batches can improve performance. Normally, the higher the batch size, the better the performance. Please note that a single row can't exceed 48 KB bytes and each SQL statement can't exceed 1 MB. -- Inserting with multiple threads can also improve performance. However, depending on the system resources on the application side and the server side, when the number of inserting threads grows beyond a specific point the performance may drop instead of improving. The proper number of threads needs to be tested in a specific environment to find the best number. The proper number of threads may be impacted by the system resources on the server side, the system resources on the client side, the table schemas, etc. +- Inserting in batches can improve performance. The higher the batch size, the better the performance. Please note that a single row can't exceed 48K bytes and each SQL statement can't exceed 1MB. +- Inserting with multiple threads can also improve performance. However, at a certain point, increasing the number of threads no longer offers any benefit and can even decrease performance due to the overhead involved in frequent thread switching. The optimal number of threads for a system depends on the processing capabilities and configuration of the server, the configuration of the database, the data schema, and the batch size for writing data. In general, more powerful clients and servers can support higher numbers of concurrently writing threads. Given a sufficiently powerful server, a higher number of vgroups for a database also increases the number of concurrent writes. Finally, a simpler data schema enables more concurrent writes as well. ::: :::warning -- If the timestamp for the row to be inserted already exists in the table, the old data will be overritten by the new values for the columns for which new values are provided, columns for which no new values are provided are not impacted. -- The timestamp to be inserted must be newer than the timestamp of subtracting current time by the parameter `KEEP`. If `KEEP` is set to 3650 days, then the data older than 3650 days ago can't be inserted. The timestamp to be inserted can't be newer than the timestamp of current time plus parameter `DURATION`. If `DAYS` is set to 2, the data newer than 2 days later can't be inserted. +- If the timestamp of a new record already exists in a table, columns with new data for that timestamp replace old data with new data, while columns without new data are not affected. +- The timestamp to be inserted must be newer than the timestamp of subtracting current time by the parameter `KEEP`. If `KEEP` is set to 3650 days, then the data older than 3650 days ago can't be inserted. The timestamp to be inserted cannot be newer than the timestamp of current time plus parameter `DURATION`. If `DURATION` is set to 2, the data newer than 2 days later can't be inserted. ::: -## Examples +## Sample program ### Insert Using SQL @@ -90,6 +90,9 @@ For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). + + + :::note @@ -101,7 +104,7 @@ For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). ### Insert with Parameter Binding -TDengine also provides API support for parameter binding. Similar to MySQL, only `?` can be used in these APIs to represent the parameters to bind. Parameter binding support for inserting data has improved significantly to improve the insert performance by avoiding the cost of parsing SQL statements. +TDengine also provides API support for parameter binding. Similar to MySQL, only `?` can be used in these APIs to represent the parameters to bind. This avoids the resource consumption of SQL syntax parsing when writing data through the parameter binding interface, thus significantly improving write performance in most cases. Parameter binding is available only with native connection. @@ -127,4 +130,8 @@ Parameter binding is available only with native connection. + + + + diff --git a/docs/en/07-develop/03-insert-data/02-influxdb-line.mdx b/docs/en/07-develop/03-insert-data/02-influxdb-line.mdx index 41109937053c31f0a141fcc90016397863152e57..11db3daeb054b2cac29c6a0ccde2add27774f3da 100644 --- a/docs/en/07-develop/03-insert-data/02-influxdb-line.mdx +++ b/docs/en/07-develop/03-insert-data/02-influxdb-line.mdx @@ -21,15 +21,15 @@ In the InfluxDB Line protocol format, a single line of text is used to represent measurement,tag_set field_set timestamp ``` -- `measurement` will be used as the name of the STable -- `tag_set` will be used as tags, with format like `=,=` -- `field_set`will be used as data columns, with format like `=,=` +- `measurement` will be used as the name of the STable Enter a comma (,) between `measurement` and `tag_set`. +- `tag_set` will be used as tags, with format like `=,=` Enter a space between `tag_set` and `field_set`. +- `field_set`will be used as data columns, with format like `=,=` Enter a space between `field_set` and `timestamp`. - `timestamp` is the primary key timestamp corresponding to this row of data For example: ``` -meters,location=California.LoSangeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500 +meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500 ``` :::note @@ -42,7 +42,6 @@ meters,location=California.LoSangeles,groupid=2 current=13.4,voltage=223,phase=0 For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol) - ## Examples diff --git a/docs/en/07-develop/03-insert-data/03-opentsdb-telnet.mdx b/docs/en/07-develop/03-insert-data/03-opentsdb-telnet.mdx index 81e1169489d4188e14f4c5338ca322041bba80fb..db9bfd73649709cf806ae6499513191db8321107 100644 --- a/docs/en/07-develop/03-insert-data/03-opentsdb-telnet.mdx +++ b/docs/en/07-develop/03-insert-data/03-opentsdb-telnet.mdx @@ -17,19 +17,19 @@ import CTelnet from "./_c_opts_telnet.mdx"; A single line of text is used in OpenTSDB line protocol to represent one row of data. OpenTSDB employs a single column data model, so each line can only contain a single data column. There can be multiple tags. Each line contains 4 parts as below: -``` +```txt =[ =] ``` - `metric` will be used as the STable name. - `timestamp` is the timestamp of current row of data. The time precision will be determined automatically based on the length of the timestamp. Second and millisecond time precision are supported. -- `value` is a metric which must be a numeric value, the corresponding column name is "_value". +- `value` is a metric which must be a numeric value, The corresponding column name is "value". - The last part is the tag set separated by spaces, all tags will be converted to nchar type automatically. For example: ```txt -meters.current 1648432611250 11.3 location=California.LoSangeles groupid=3 +meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3 ``` Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details. @@ -63,7 +63,7 @@ Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_te taos> use test; Database changed. -taos> show STables; +taos> show stables; name | created_time | columns | tags | tables | ============================================================================================ meters.current | 2022-03-30 17:04:10.877 | 2 | 2 | 2 | @@ -73,8 +73,8 @@ Query OK, 2 row(s) in set (0.002544s) taos> select tbname, * from `meters.current`; tbname | _ts | _value | groupid | location | ================================================================================================================================== - t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LoSangeles | - t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | California.LoSangeles | + t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LosAngeles | + t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | California.LosAngeles | t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.249 | 10.300000000 | 2 | California.SanFrancisco | t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco | Query OK, 4 row(s) in set (0.005399s) diff --git a/docs/en/07-develop/03-insert-data/04-opentsdb-json.mdx b/docs/en/07-develop/03-insert-data/04-opentsdb-json.mdx index aad94c3d913a128b344757162c231affc6a64651..23703f4087483373a15e9cf7604bb67ca62888f5 100644 --- a/docs/en/07-develop/03-insert-data/04-opentsdb-json.mdx +++ b/docs/en/07-develop/03-insert-data/04-opentsdb-json.mdx @@ -15,37 +15,37 @@ import CJson from "./_c_opts_json.mdx"; ## Introduction -A JSON string is used in OpenTSDB JSON to represent one or more rows of data, for example: +A JSON string is used in OpenTSDB JSON to represent one or more rows of data, for example: For example: ```json [ - { - "metric": "sys.cpu.nice", - "timestamp": 1346846400, - "value": 18, - "tags": { - "host": "web01", - "dc": "lga" - } - }, - { - "metric": "sys.cpu.nice", - "timestamp": 1346846400, - "value": 9, - "tags": { - "host": "web02", - "dc": "lga" - } + { + "metric": "sys.cpu.nice", + "timestamp": 1346846400, + "value": 18, + "tags": { + "host": "web01", + "dc": "lga" } + }, + { + "metric": "sys.cpu.nice", + "timestamp": 1346846400, + "value": 9, + "tags": { + "host": "web02", + "dc": "lga" + } + } ] ``` Similar to OpenTSDB line protocol, `metric` will be used as the STable name, `timestamp` is the timestamp to be used, `value` represents the metric collected, `tags` are the tag sets. - Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http/put.html) for more details. :::note + - In JSON protocol, strings will be converted to nchar type and numeric values will be converted to double type. - Only data in array format is accepted and so an array must be used even if there is only one row. @@ -74,13 +74,13 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http -The above sample code will created 2 STables automatically while each STable has 2 rows of data. +2 STables will be created automatically and each STable has 2 rows of data in the above sample code. ```cmd taos> use test; Database changed. -taos> show STables; +taos> show stables; name | created_time | columns | tags | tables | ============================================================================================ meters.current | 2022-03-29 16:05:25.193 | 2 | 2 | 1 | diff --git a/docs/en/07-develop/03-insert-data/05-high-volume.md b/docs/en/07-develop/03-insert-data/05-high-volume.md new file mode 100644 index 0000000000000000000000000000000000000000..9ea0c884473e670d0624cb3be737830f46bedc38 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/05-high-volume.md @@ -0,0 +1,441 @@ +--- +sidebar_label: High Performance Writing +title: High Performance Writing +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +This chapter introduces how to write data into TDengine with high throughput. + +## How to achieve high performance data writing + +To achieve high performance writing, there are a few aspects to consider. In the following sections we will describe these important factors in achieving high performance writing. + +### Application Program + +From the perspective of application program, you need to consider: + +1. The data size of each single write, also known as batch size. Generally speaking, higher batch size generates better writing performance. However, once the batch size is over a specific value, you will not get any additional benefit anymore. When using SQL to write into TDengine, it's better to put as much as possible data in single SQL. The maximum SQL length supported by TDengine is 1,048,576 bytes, i.e. 1 MB. + +2. The number of concurrent connections. Normally more connections can get better result. However, once the number of connections exceeds the processing ability of the server side, the performance may downgrade. + +3. The distribution of data to be written across tables or sub-tables. Writing to single table in one batch is more efficient than writing to multiple tables in one batch. + +4. Data Writing Protocol. + - Prameter binding mode is more efficient than SQL because it doesn't have the cost of parsing SQL. + - Writing to known existing tables is more efficient than wirting to uncertain tables in automatic creating mode because the later needs to check whether the table exists or not before actually writing data into it + - Writing in SQL is more efficient than writing in schemaless mode because schemaless writing creats table automatically and may alter table schema + +Application programs need to take care of the above factors and try to take advantage of them. The application progam should write to single table in each write batch. The batch size needs to be tuned to a proper value on a specific system. The number of concurrent connections needs to be tuned to a proper value too to achieve the best writing throughput. + +### Data Source + +Application programs need to read data from data source then write into TDengine. If you meet one or more of below situations, you need to setup message queues between the threads for reading from data source and the threads for writing into TDengine. + +1. There are multiple data sources, the data generation speed of each data source is much slower than the speed of single writing thread. In this case, the purpose of message queues is to consolidate the data from multiple data sources together to increase the batch size of single write. +2. The speed of data generation from single data source is much higher than the speed of single writing thread. The purpose of message queue in this case is to provide buffer so that data is not lost and multiple writing threads can get data from the buffer. +3. The data for single table are from multiple data source. In this case the purpose of message queues is to combine the data for single table together to improve the write efficiency. + +If the data source is Kafka, then the appication program is a consumer of Kafka, you can benefit from some kafka features to achieve high performance writing: + +1. Put the data for a table in single partition of single topic so that it's easier to put the data for each table together and write in batch +2. Subscribe multiple topics to accumulate data together. +3. Add more consumers to gain more concurrency and throughput. +4. Incrase the size of single fetch to increase the size of write batch. + +### Tune TDengine + +On the server side, database configuration parameter `vgroups` needs to be set carefully to maximize the system performance. If it's set too low, the system capability can't be utilized fully; if it's set too big, unnecessary resource competition may be produced. A normal recommendation for `vgroups` parameter is 2 times of the number of CPU cores. However, depending on the actual system resources, it may still need to tuned. + +For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config)。 + +## Sample Programs + +This section will introduce the sample programs to demonstrate how to write into TDengine with high performance. + +### Scenario + +Below are the scenario for the sample programs of high performance wrting. + +- Application program reads data from data source, the sample program simulates a data source by generating data +- The speed of single writing thread is much slower than the speed of generating data, so the program starts multiple writing threads while each thread establish a connection to TDengine and each thread has a message queue of fixed size. +- Application program maps the received data to different writing threads based on table name to make sure all the data for each table is always processed by a specific writing thread. +- Each writing thread writes the received data into TDengine once the message queue becomes empty or the read data meets a threshold. + +![Thread Model of High Performance Writing into TDengine](highvolume.webp) + +### Sample Programs + +The sample programs listed in this section are based on the scenario described previously. If your scenarios is different, please try to adjust the code based on the principles described in this chapter. + +The sample programs assume the source data is for all the different sub tables in same super table (meters). The super table has been created before the sample program starts to writing data. Sub tables are created automatically according to received data. If there are multiple super tables in your case, please try to adjust the part of creating table automatically. + + + + +**Program Inventory** + +| Class | Description | +| ---------------- | ----------------------------------------------------------------------------------------------------- | +| FastWriteExample | Main Program | +| ReadTask | Read data from simulated data source and put into a queue according to the hash value of table name | +| WriteTask | Read data from Queue, compose a wirte batch and write into TDengine | +| MockDataSource | Generate data for some sub tables of super table meters | +| SQLWriter | WriteTask uses this class to compose SQL, create table automatically, check SQL length and write data | +| StmtWriter | Write in Parameter binding mode (Not finished yet) | +| DataBaseMonitor | Calculate the writing speed and output on console every 10 seconds | + +Below is the list of complete code of the classes in above table and more detailed description. + +
+FastWriteExample +The main Program is responsible for: + +1. Create message queues +2. Start writing threads +3. Start reading threads +4. Otuput writing speed every 10 seconds + +The main program provides 4 parameters for tuning: + +1. The number of reading threads, default value is 1 +2. The number of writing threads, default alue is 2 +3. The total number of tables in the generated data, default value is 1000. These tables are distributed evenly across all writing threads. If the number of tables is very big, it will cost much time to firstly create these tables. +4. The batch size of single write, default value is 3,000 + +The capacity of message queue also impacts performance and can be tuned by modifying program. Normally it's always better to have a larger message queue. A larger message queue means lower possibility of being blocked when enqueueing and higher throughput. But a larger message queue consumes more memory space. The default value used in the sample programs is already big enoug. + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java}} +``` + +
+ +
+ReadTask + +ReadTask reads data from data source. Each ReadTask is associated with a simulated data source, each data source generates data for a group of specific tables, and the data of any table is only generated from a single specific data source. + +ReadTask puts data in message queue in blocking mode. That means, the putting operation is blocked if the message queue is full. + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java}} +``` + +
+ +
+WriteTask + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java}} +``` + +
+ +
+ +MockDataSource + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java}} +``` + +
+ +
+ +SQLWriter + +SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java}} +``` + +
+ +
+ +DataBaseMonitor + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java}} +``` + +
+ +**Steps to Launch** + +
+Launch Java Sample Program + +You need to set environment variable `TDENGINE_JDBC_URL` before launching the program. If TDengine Server is setup on localhost, then the default value for user name, password and port can be used, like below: + +``` +TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata" +``` + +**Launch in IDE** + +1. Clone TDengine repolitory + ``` + git clone git@github.com:taosdata/TDengine.git --depth 1 + ``` +2. Use IDE to open `docs/examples/java` directory +3. Configure environment variable `TDENGINE_JDBC_URL`, you can also configure it before launching the IDE, if so you can skip this step. +4. Run class `com.taos.example.highvolume.FastWriteExample` + +**Launch on server** + +If you want to launch the sample program on a remote server, please follow below steps: + +1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java` : + ``` + mvn package + ``` +2. Create `examples/java` directory on the server + ``` + mkdir -p examples/java + ``` +3. Copy dependencies (below commands assume you are working on a local Windows host and try to launch on a remote Linux host) + - Copy dependent packages + ``` + scp -r .\target\lib @:~/examples/java + ``` + - Copy the jar of sample programs + ``` + scp -r .\target\javaexample-1.0.jar @:~/examples/java + ``` +4. Configure environment variable + Edit `~/.bash_profile` or `~/.bashrc` and add below: + + ``` + export TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata" + ``` + + If your TDengine server is not deployed on localhost or doesn't use default port, you need to change the above URL to correct value in your environment. + +5. Launch the sample program + + ``` + java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample + ``` + +6. The sample program doesn't exit unless you press CTRL + C to terminate it. + Below is the output of running on a server of 16 cores, 64GB memory and SSD hard disk. + + ``` + root@vm85$ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample 2 12 + 18:56:35.896 [main] INFO c.t.e.highvolume.FastWriteExample - readTaskCount=2, writeTaskCount=12 tableCount=1000 maxBatchSize=3000 + 18:56:36.011 [WriteThread-0] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.015 [WriteThread-0] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.021 [WriteThread-1] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.022 [WriteThread-1] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.031 [WriteThread-2] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.032 [WriteThread-2] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.041 [WriteThread-3] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.042 [WriteThread-3] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.093 [WriteThread-4] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.094 [WriteThread-4] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.099 [WriteThread-5] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.100 [WriteThread-5] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.100 [WriteThread-6] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.101 [WriteThread-6] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.103 [WriteThread-7] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.104 [WriteThread-7] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.105 [WriteThread-8] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.107 [WriteThread-8] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.108 [WriteThread-9] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.109 [WriteThread-9] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.156 [WriteThread-10] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.157 [WriteThread-11] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.158 [WriteThread-10] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.158 [ReadThread-0] INFO com.taos.example.highvolume.ReadTask - started + 18:56:36.158 [ReadThread-1] INFO com.taos.example.highvolume.ReadTask - started + 18:56:36.158 [WriteThread-11] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:46.369 [main] INFO c.t.e.highvolume.FastWriteExample - count=18554448 speed=1855444 + 18:56:56.946 [main] INFO c.t.e.highvolume.FastWriteExample - count=39059660 speed=2050521 + 18:57:07.322 [main] INFO c.t.e.highvolume.FastWriteExample - count=59403604 speed=2034394 + 18:57:18.032 [main] INFO c.t.e.highvolume.FastWriteExample - count=80262938 speed=2085933 + 18:57:28.432 [main] INFO c.t.e.highvolume.FastWriteExample - count=101139906 speed=2087696 + 18:57:38.921 [main] INFO c.t.e.highvolume.FastWriteExample - count=121807202 speed=2066729 + 18:57:49.375 [main] INFO c.t.e.highvolume.FastWriteExample - count=142952417 speed=2114521 + 18:58:00.689 [main] INFO c.t.e.highvolume.FastWriteExample - count=163650306 speed=2069788 + 18:58:11.646 [main] INFO c.t.e.highvolume.FastWriteExample - count=185019808 speed=2136950 + ``` + +
+ +
+ + +**Program Inventory** + +Sample programs in Python uses multi-process and cross-process message queues. + +| Function/CLass | Description | +| ---------------------------- | --------------------------------------------------------------------------- | +| main Function | Program entry point, create child processes and message queues | +| run_monitor_process Function | Create database, super table, calculate writing speed and output to console | +| run_read_task Function | Read data and distribute to message queues | +| MockDataSource Class | Simulate data source, return next 1,000 rows of each table | +| run_write_task Function | Read as much as possible data from message queue and write in batch | +| SQLWriter Class | Write in SQL and create table utomatically | +| StmtWriter Class | Write in parameter binding mode (not finished yet) | + +
+main function + +`main` function is responsible for creating message queues and fork child processes, there are 3 kinds of child processes: + +1. Monitoring process, initializes database and calculating writing speed +2. Reading process (n), reads data from data source +3. Writing process (m), wirtes data into TDengine + +`main` function provides 5 parameters: + +1. The number of reading tasks, default value is 1 +2. The number of writing tasks, default value is 1 +3. The number of tables, default value is 1,000 +4. The capacity of message queue, default value is 1,000,000 bytes +5. The batch size in single write, default value is 3000 + +```python +{{#include docs/examples/python/fast_write_example.py:main}} +``` + +
+ +
+run_monitor_process + +Monitoring process initilizes database and monitoring writing speed. + +```python +{{#include docs/examples/python/fast_write_example.py:monitor}} +``` + +
+ +
+ +run_read_task function + +Reading process reads data from other data system and distributes to the message queue allocated for it. + +```python +{{#include docs/examples/python/fast_write_example.py:read}} +``` + +
+ +
+ +MockDataSource + +Below is the simulated data source, we assume table name exists in each generated data. + +```python +{{#include docs/examples/python/mockdatasource.py}} +``` + +
+ +
+run_write_task function + +Writing process tries to read as much as possible data from message queue and writes in batch. + +```python +{{#include docs/examples/python/fast_write_example.py:write}} +``` + +
+ +
+ +SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. This class also checks the SQL length, and passes the maximum SQL length by parameter maxSQLLength according to actual TDengine limit. + +SQLWriter + +```python +{{#include docs/examples/python/sql_writer.py}} +``` + +
+ +**Steps to Launch** + +
+ +Launch Sample Program in Python + +1. Prerequisities + + - TDengine client driver has been installed + - Python3 has been installed, the the version >= 3.8 + - TDengine Python connector `taospy` has been installed + +2. Install faster-fifo to replace python builtin multiprocessing.Queue + + ``` + pip3 install faster-fifo + ``` + +3. Click the "Copy" in the above sample programs to copy `fast_write_example.py` 、 `sql_writer.py` and `mockdatasource.py`. + +4. Execute the program + + ``` + python3 fast_write_example.py + ``` + + Below is the output of running on a server of 16 cores, 64GB memory and SSD hard disk. + + ``` + root@vm85$ python3 fast_write_example.py 8 8 + 2022-07-14 19:13:45,869 [root] - READ_TASK_COUNT=8, WRITE_TASK_COUNT=8, TABLE_COUNT=1000, QUEUE_SIZE=1000000, MAX_BATCH_SIZE=3000 + 2022-07-14 19:13:48,882 [root] - WriteTask-0 started with pid 718347 + 2022-07-14 19:13:48,883 [root] - WriteTask-1 started with pid 718348 + 2022-07-14 19:13:48,884 [root] - WriteTask-2 started with pid 718349 + 2022-07-14 19:13:48,884 [root] - WriteTask-3 started with pid 718350 + 2022-07-14 19:13:48,885 [root] - WriteTask-4 started with pid 718351 + 2022-07-14 19:13:48,885 [root] - WriteTask-5 started with pid 718352 + 2022-07-14 19:13:48,886 [root] - WriteTask-6 started with pid 718353 + 2022-07-14 19:13:48,886 [root] - WriteTask-7 started with pid 718354 + 2022-07-14 19:13:48,887 [root] - ReadTask-0 started with pid 718355 + 2022-07-14 19:13:48,888 [root] - ReadTask-1 started with pid 718356 + 2022-07-14 19:13:48,889 [root] - ReadTask-2 started with pid 718357 + 2022-07-14 19:13:48,889 [root] - ReadTask-3 started with pid 718358 + 2022-07-14 19:13:48,890 [root] - ReadTask-4 started with pid 718359 + 2022-07-14 19:13:48,891 [root] - ReadTask-5 started with pid 718361 + 2022-07-14 19:13:48,892 [root] - ReadTask-6 started with pid 718364 + 2022-07-14 19:13:48,893 [root] - ReadTask-7 started with pid 718365 + 2022-07-14 19:13:56,042 [DataBaseMonitor] - count=6676310 speed=667631.0 + 2022-07-14 19:14:06,196 [DataBaseMonitor] - count=20004310 speed=1332800.0 + 2022-07-14 19:14:16,366 [DataBaseMonitor] - count=32290310 speed=1228600.0 + 2022-07-14 19:14:26,527 [DataBaseMonitor] - count=44438310 speed=1214800.0 + 2022-07-14 19:14:36,673 [DataBaseMonitor] - count=56608310 speed=1217000.0 + 2022-07-14 19:14:46,834 [DataBaseMonitor] - count=68757310 speed=1214900.0 + 2022-07-14 19:14:57,280 [DataBaseMonitor] - count=80992310 speed=1223500.0 + 2022-07-14 19:15:07,689 [DataBaseMonitor] - count=93805310 speed=1281300.0 + 2022-07-14 19:15:18,020 [DataBaseMonitor] - count=106111310 speed=1230600.0 + 2022-07-14 19:15:28,356 [DataBaseMonitor] - count=118394310 speed=1228300.0 + 2022-07-14 19:15:38,690 [DataBaseMonitor] - count=130742310 speed=1234800.0 + 2022-07-14 19:15:49,000 [DataBaseMonitor] - count=143051310 speed=1230900.0 + 2022-07-14 19:15:59,323 [DataBaseMonitor] - count=155276310 speed=1222500.0 + 2022-07-14 19:16:09,649 [DataBaseMonitor] - count=167603310 speed=1232700.0 + 2022-07-14 19:16:19,995 [DataBaseMonitor] - count=179976310 speed=1237300.0 + ``` + +
+ +:::note +Don't establish connection to TDengine in the parent process if using Python connector in multi-process way, otherwise all the connections in child processes are blocked always. This is a known issue. + +::: + +
+
diff --git a/docs/en/07-develop/03-insert-data/_go_stmt.mdx b/docs/en/07-develop/03-insert-data/_go_stmt.mdx index ab519c9a806345c2f14337f62c74728da955d2e0..0bdcffc16fa18a1d0afcd50548ed6076f6154838 100644 --- a/docs/en/07-develop/03-insert-data/_go_stmt.mdx +++ b/docs/en/07-develop/03-insert-data/_go_stmt.mdx @@ -3,6 +3,6 @@ ``` :::tip -`github.com/taosdata/driver-go/v2/wrapper` module in driver-go is the wrapper for C API, it can be used to insert data with parameter binding. +`github.com/taosdata/driver-go/v3/wrapper` module in driver-go is the wrapper for C API, it can be used to insert data with parameter binding. ::: diff --git a/docs/en/07-develop/03-insert-data/_php_sql.mdx b/docs/en/07-develop/03-insert-data/_php_sql.mdx new file mode 100644 index 0000000000000000000000000000000000000000..78cd663ec219dabc2eeb81c7e67426eda41d7762 --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_php_sql.mdx @@ -0,0 +1,3 @@ +```php +{{#include docs/examples/php/insert.php}} +``` diff --git a/docs/en/07-develop/03-insert-data/_php_stmt.mdx b/docs/en/07-develop/03-insert-data/_php_stmt.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3bb7b2f8da9887c1063822e69bfdff599aa50b7b --- /dev/null +++ b/docs/en/07-develop/03-insert-data/_php_stmt.mdx @@ -0,0 +1,3 @@ +```php +{{#include docs/examples/php/insert_stmt.php}} +``` diff --git a/docs/en/07-develop/03-insert-data/highvolume.webp b/docs/en/07-develop/03-insert-data/highvolume.webp new file mode 100644 index 0000000000000000000000000000000000000000..46dfc74ae3b0043c591ff930c62251da49cae7ad Binary files /dev/null and b/docs/en/07-develop/03-insert-data/highvolume.webp differ diff --git a/docs/en/07-develop/04-query-data/_php.mdx b/docs/en/07-develop/04-query-data/_php.mdx new file mode 100644 index 0000000000000000000000000000000000000000..bcafd1cfbcb1bbb55b03f6fe198e6fa1b5251b19 --- /dev/null +++ b/docs/en/07-develop/04-query-data/_php.mdx @@ -0,0 +1,3 @@ +```go +{{#include docs/examples/php/query.php}} +``` diff --git a/docs/en/07-develop/04-query-data/index.mdx b/docs/en/07-develop/04-query-data/index.mdx index a212fa9529215fc24c55c95a166cfc1a407359b2..38dc98d1ff262c7f8ec4951297e6f42e436682c8 100644 --- a/docs/en/07-develop/04-query-data/index.mdx +++ b/docs/en/07-develop/04-query-data/index.mdx @@ -1,6 +1,5 @@ --- -Sidebar_label: Query data -title: Query data +title: Query Data description: "This chapter introduces major query functionalities and how to perform sync and async query using connectors." --- @@ -13,6 +12,7 @@ import RustQuery from "./_rust.mdx"; import NodeQuery from "./_js.mdx"; import CsQuery from "./_cs.mdx"; import CQuery from "./_c.mdx"; +import PhpQuery from "./_php.mdx"; import PyAsync from "./_py_async.mdx"; import NodeAsync from "./_js_async.mdx"; import CsAsync from "./_cs_async.mdx"; @@ -24,9 +24,8 @@ SQL is used by TDengine as its query language. Application programs can send SQL - Query on single column or multiple columns - Filter on tags or data columns:>, <, =, <\>, like -- Grouping of results: `Group By` -- Sorting of results: `Order By` -- Limit the number of results: `Limit/Offset` +- Grouping of results: `Group By` - Sorting of results: `Order By` - Limit the number of results: `Limit/Offset` +- Windowed aggregate queries for time windows (interval), session windows (session), and state windows (state_window) - Arithmetic on columns of numeric types or aggregate results - Join query with timestamp alignment - Aggregate functions: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff @@ -34,10 +33,6 @@ SQL is used by TDengine as its query language. Application programs can send SQL For example, the SQL statement below can be executed in TDengine CLI `taos` to select records with voltage greater than 215 and limit the output to only 2 rows. ```sql -select * from d1001 where voltage > 215 order by ts desc limit 2; -``` - -```title=Output taos> select * from d1001 where voltage > 215 order by ts desc limit 2; ts | current | voltage | phase | ====================================================================================== @@ -46,89 +41,88 @@ taos> select * from d1001 where voltage > 215 order by ts desc limit 2; Query OK, 2 row(s) in set (0.001100s) ``` -To meet the requirements of varied use cases, some special functions have been added in TDengine. Some examples are `twa` (Time Weighted Average), `spread` (The difference between the maximum and the minimum), and `last_row` (the last row). Furthermore, continuous query is also supported in TDengine. +To meet the requirements of varied use cases, some special functions have been added in TDengine. Some examples are `twa` (Time Weighted Average), `spread` (The difference between the maximum and the minimum), and `last_row` (the last row). -For detailed query syntax please refer to [Select](/taos-sql/select). +For detailed query syntax, see [Select](../../taos-sql/select). ## Aggregation among Tables In most use cases, there are always multiple kinds of data collection points. A new concept, called STable (abbreviation for super table), is used in TDengine to represent one type of data collection point, and a subtable is used to represent a specific data collection point of that type. Tags are used by TDengine to represent the static properties of data collection points. A specific data collection point has its own values for static properties. By specifying filter conditions on tags, aggregation can be performed efficiently among all the subtables created via the same STable, i.e. same type of data collection points. Aggregate functions applicable for tables can be used directly on STables; the syntax is exactly the same. -In summary, records across subtables can be aggregated by a simple query on their STable. It is like a join operation. However, tables belonging to different STables can not be aggregated. - ### Example 1 In TDengine CLI `taos`, use the SQL below to get the average voltage of all the meters in California grouped by location. ``` -taos> SELECT AVG(voltage) FROM meters GROUP BY location; - avg(voltage) | location | -============================================================= - 222.000000000 | California.LosAngeles | - 219.200000000 | California.SanFrancisco | -Query OK, 2 row(s) in set (0.002136s) +taos> SELECT AVG(voltage), location FROM meters GROUP BY location; + avg(voltage) | location | +=============================================================================================== + 219.200000000 | California.SanFrancisco | + 221.666666667 | California.LosAngeles | +Query OK, 2 rows in database (0.005995s) ``` ### Example 2 -In TDengine CLI `taos`, use the SQL below to get the number of rows and the maximum current in the past 24 hours from meters whose groupId is 2. +In TDengine CLI `taos`, use the SQL below to get the number of rows and the maximum current from meters whose groupId is 2. ``` -taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h; +taos> SELECT count(*), max(current) FROM meters where groupId = 2; count(*) | max(current) | ================================== 5 | 13.4 | Query OK, 1 row(s) in set (0.002136s) ``` -Join queries are only allowed between subtables of the same STable. In [Select](/taos-sql/select), all query operations are marked as to whether they support STables or not. +In [Select](../../taos-sql/select), all query operations are marked as to whether they support STables or not. ## Down Sampling and Interpolation In IoT use cases, down sampling is widely used to aggregate data by time range. The `INTERVAL` keyword in TDengine can be used to simplify the query by time window. For example, the SQL statement below can be used to get the sum of current every 10 seconds from meters table d1001. ``` -taos> SELECT sum(current) FROM d1001 INTERVAL(10s); - ts | sum(current) | +taos> SELECT _wstart, sum(current) FROM d1001 INTERVAL(10s); + _wstart | sum(current) | ====================================================== 2018-10-03 14:38:00.000 | 10.300000191 | 2018-10-03 14:38:10.000 | 24.900000572 | -Query OK, 2 row(s) in set (0.000883s) +Query OK, 2 rows in database (0.003139s) ``` Down sampling can also be used for STable. For example, the below SQL statement can be used to get the sum of current from all meters in California. ``` -taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s); - ts | sum(current) | +taos> SELECT _wstart, SUM(current) FROM meters where location like "California%" INTERVAL(1s); + _wstart | sum(current) | ====================================================== 2018-10-03 14:38:04.000 | 10.199999809 | - 2018-10-03 14:38:05.000 | 32.900000572 | + 2018-10-03 14:38:05.000 | 23.699999809 | 2018-10-03 14:38:06.000 | 11.500000000 | 2018-10-03 14:38:15.000 | 12.600000381 | - 2018-10-03 14:38:16.000 | 36.000000000 | -Query OK, 5 row(s) in set (0.001538s) + 2018-10-03 14:38:16.000 | 34.400000572 | +Query OK, 5 rows in database (0.007413s) ``` Down sampling also supports time offset. For example, the below SQL statement can be used to get the sum of current from all meters but each time window must start at the boundary of 500 milliseconds. ``` -taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a); - ts | sum(current) | +taos> SELECT _wstart, SUM(current) FROM meters INTERVAL(1s, 500a); + _wstart | sum(current) | ====================================================== - 2018-10-03 14:38:04.500 | 11.189999809 | - 2018-10-03 14:38:05.500 | 31.900000572 | - 2018-10-03 14:38:06.500 | 11.600000000 | - 2018-10-03 14:38:15.500 | 12.300000381 | - 2018-10-03 14:38:16.500 | 35.000000000 | -Query OK, 5 row(s) in set (0.001521s) + 2018-10-03 14:38:03.500 | 10.199999809 | + 2018-10-03 14:38:04.500 | 10.300000191 | + 2018-10-03 14:38:05.500 | 13.399999619 | + 2018-10-03 14:38:06.500 | 11.500000000 | + 2018-10-03 14:38:14.500 | 12.600000381 | + 2018-10-03 14:38:16.500 | 34.400000572 | +Query OK, 6 rows in database (0.005515s) ``` In many use cases, it's hard to align the timestamp of the data collected by each collection point. However, a lot of algorithms like FFT require the data to be aligned with same time interval and application programs have to handle this by themselves. In TDengine, it's easy to achieve the alignment using down sampling. Interpolation can be performed in TDengine if there is no data in a time range. -For more details please refer to [Aggregate by Window](/taos-sql/interval). +For more information, see [Aggregate by Window](../../taos-sql/distinguished). ## Examples @@ -158,6 +152,9 @@ In the section describing [Insert](/develop/insert-data/sql-writing), a database + + + :::note diff --git a/docs/en/07-develop/06-continuous-query.mdx b/docs/en/07-develop/06-continuous-query.mdx deleted file mode 100644 index 1aea5783fc8116a4e02a4b5345d341707cd399ea..0000000000000000000000000000000000000000 --- a/docs/en/07-develop/06-continuous-query.mdx +++ /dev/null @@ -1,83 +0,0 @@ ---- -sidebar_label: Continuous Query -description: "Continuous query is a query that's executed automatically at a predefined frequency to provide aggregate query capability by time window. It is essentially simplified, time driven, stream computing." -title: "Continuous Query" ---- - -A continuous query is a query that's executed automatically at a predefined frequency to provide aggregate query capability by time window. It is essentially simplified, time driven, stream computing. A continuous query can be performed on a table or STable in TDengine. The results of a continuous query can be pushed to clients or written back to TDengine. Each query is executed on a time window, which moves forward with time. The size of time window and the forward sliding time need to be specified with parameter `INTERVAL` and `SLIDING` respectively. - -A continuous query in TDengine is time driven, and can be defined using TAOS SQL directly without any extra operations. With a continuous query, the result can be generated based on a time window to achieve down sampling of the original data. Once a continuous query is defined using TAOS SQL, the query is automatically executed at the end of each time window and the result is pushed back to clients or written to TDengine. - -There are some differences between continuous query in TDengine and time window computation in stream computing: - -- The computation is performed and the result is returned in real time in stream computing, but the computation in continuous query is only started when a time window closes. For example, if the time window is 1 day, then the result will only be generated at 23:59:59. -- If a historical data row is written in to a time window for which the computation has already finished, the computation will not be performed again and the result will not be pushed to client applications again. If the results have already been written into TDengine, they will not be updated. -- In continuous query, if the result is pushed to a client, the client status is not cached on the server side and Exactly-once is not guaranteed by the server. If the client program crashes, a new time window will be generated from the time where the continuous query is restarted. If the result is written into TDengine, the data written into TDengine can be guaranteed as valid and continuous. - -## Syntax - -```sql -[CREATE TABLE AS] SELECT select_expr [, select_expr ...] - FROM {tb_name_list} - [WHERE where_condition] - [INTERVAL(interval_val [, interval_offset]) [SLIDING sliding_val]] - -``` - -INTERVAL: The time window for which continuous query is performed - -SLIDING: The time step for which the time window moves forward each time - -## How to Use - -In this section the use case of meters will be used to introduce how to use continuous query. Assume the STable and subtables have been created using the SQL statements below. - -```sql -create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int); -create table D1001 using meters tags ("California.SanFrancisco", 2); -create table D1002 using meters tags ("California.LosAngeles", 2); -``` - -The SQL statement below retrieves the average voltage for a one minute time window, with each time window moving forward by 30 seconds. - -```sql -select avg(voltage) from meters interval(1m) sliding(30s); -``` - -Whenever the above SQL statement is executed, all the existing data will be computed again. If the computation needs to be performed every 30 seconds automatically to compute on the data in the past one minute, the above SQL statement needs to be revised as below, in which `{startTime}` stands for the beginning timestamp in the latest time window. - -```sql -select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s); -``` - -An easier way to achieve this is to prepend `create table {tableName} as` before the `select`. - -```sql -create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s); -``` - -A table named as `avg_vol` will be created automatically, then every 30 seconds the `select` statement will be executed automatically on the data in the past 1 minute, i.e. the latest time window, and the result is written into table `avg_vol`. The client program just needs to query from table `avg_vol`. For example: - -```sql -taos> select * from avg_vol; - ts | avg_voltage_ | -=================================================== - 2020-07-29 13:37:30.000 | 222.0000000 | - 2020-07-29 13:38:00.000 | 221.3500000 | - 2020-07-29 13:38:30.000 | 220.1700000 | - 2020-07-29 13:39:00.000 | 223.0800000 | -``` - -Please note that the minimum allowed time window is 10 milliseconds, and there is no upper limit. - -It's possible to specify the start and end time of a continuous query. If the start time is not specified, the timestamp of the first row will be considered as the start time; if the end time is not specified, the continuous query will be performed indefinitely, otherwise it will be terminated once the end time is reached. For example, the continuous query in the SQL statement below will be started from now and terminated one hour later. - -```sql -create table avg_vol as select avg(voltage) from meters where ts > now and ts <= now + 1h interval(1m) sliding(30s); -``` - -`now` in the above SQL statement stands for the time when the continuous query is created, not the time when the computation is actually performed. To avoid the trouble caused by a delay in receiving data as much as possible, the actual computation in a continuous query is started after a little delay. That means, once a time window closes, the computation is not started immediately. Normally, the result are available after a little time, normally within one minute, after the time window closes. - -## How to Manage - -`show streams` command can be used in the TDengine CLI `taos` to show all the continuous queries in the system, and `kill stream` can be used to terminate a continuous query. diff --git a/docs/en/07-develop/06-stream.md b/docs/en/07-develop/06-stream.md new file mode 100644 index 0000000000000000000000000000000000000000..36f903ee9a4f2d210e63d0b79e702bc199f790ed --- /dev/null +++ b/docs/en/07-develop/06-stream.md @@ -0,0 +1,113 @@ +--- +sidebar_label: Stream Processing +description: "The TDengine stream processing engine combines data inserts, preprocessing, analytics, real-time computation, and alerting into a single component." +title: Stream Processing +--- + +Raw time-series data is often cleaned and preprocessed before being permanently stored in a database. In a traditional time-series solution, this generally requires the deployment of stream processing systems such as Kafka or Flink. However, the complexity of such systems increases the cost of development and maintenance. + +With the stream processing engine built into TDengine, you can process incoming data streams in real time and define stream transformations in SQL. Incoming data is automatically processed, and the results are pushed to specified tables based on triggering rules that you define. This is a lightweight alternative to complex processing engines that returns computation results in milliseconds even in high throughput scenarios. + +The stream processing engine includes data filtering, scalar function computation (including user-defined functions), and window aggregation, with support for sliding windows, session windows, and event windows. Stream processing can write data to supertables from other supertables, standard tables, or subtables. When you create a stream, the target supertable is automatically created. New data is then processed and written to that supertable according to the rules defined for the stream. You can use PARTITION BY statements to partition the data by table name or tag. Separate partitions are then written to different subtables within the target supertable. + +TDengine stream processing supports the aggregation of supertables that are deployed across multiple vnodes. It can also handle out-of-order writes and includes a watermark mechanism that determines the extent to which out-of-order data is accepted by the system. You can configure whether to drop or reprocess out-of-order data through the **ignore expired** parameter. + +For more information, see [Stream Processing](../../taos-sql/stream). + + +## Create a Stream + +```sql +CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name AS subquery +stream_options: { + TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time] + WATERMARK time + IGNORE EXPIRED [0 | 1] +} +``` + +For more information, see [Stream Processing](../../taos-sql/stream). + +## Usage Scenario 1 + +It is common that smart electrical meter systems for businesses generate millions of data points that are widely dispersed and not ordered. The time required to clean and convert this data makes efficient, real-time processing impossible for traditional solutions. This scenario shows how you can configure TDengine stream processing to drop data points over 220 V, find the maximum voltage for 5 second windows, and output this data to a table. + +### Create a Database for Raw Data + +A database including one supertable and four subtables is created as follows: + +```sql +DROP DATABASE IF EXISTS power; +CREATE DATABASE power; +USE power; + +CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int); + +CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2); +CREATE TABLE d1002 USING meters TAGS ("California.SanFrancisco", 3); +CREATE TABLE d1003 USING meters TAGS ("California.LosAngeles", 2); +CREATE TABLE d1004 USING meters TAGS ("California.LosAngeles", 3); +``` + +### Create a Stream + +```sql +create stream current_stream into current_stream_output_stb as select _wstart as start, _wend as end, max(current) as max_current from meters where voltage <= 220 interval (5s); +``` + +### Write Data +```sql +insert into d1001 values("2018-10-03 14:38:05.000", 10.30000, 219, 0.31000); +insert into d1001 values("2018-10-03 14:38:15.000", 12.60000, 218, 0.33000); +insert into d1001 values("2018-10-03 14:38:16.800", 12.30000, 221, 0.31000); +insert into d1002 values("2018-10-03 14:38:16.650", 10.30000, 218, 0.25000); +insert into d1003 values("2018-10-03 14:38:05.500", 11.80000, 221, 0.28000); +insert into d1003 values("2018-10-03 14:38:16.600", 13.40000, 223, 0.29000); +insert into d1004 values("2018-10-03 14:38:05.000", 10.80000, 223, 0.29000); +insert into d1004 values("2018-10-03 14:38:06.500", 11.50000, 221, 0.35000); +``` + +### Query the Results + +```sql +taos> select start, end, max_current from current_stream_output_stb; + start | end | max_current | +=========================================================================== + 2018-10-03 14:38:05.000 | 2018-10-03 14:38:10.000 | 10.30000 | + 2018-10-03 14:38:15.000 | 2018-10-03 14:38:20.000 | 12.60000 | +Query OK, 2 rows in database (0.018762s) +``` + +## Usage Scenario 2 + +In this scenario, the active power and reactive power are determined from the data gathered in the previous scenario. The location and name of each meter are concatenated with a period (.) between them, and the data set is partitioned by meter name and written to a new database. + +### Create a Database for Raw Data + +The procedure from the previous scenario is used to create the database. + +### Create a Stream + +```sql +create stream power_stream into power_stream_output_stb as select ts, concat_ws(".", location, tbname) as meter_location, current*voltage*cos(phase) as active_power, current*voltage*sin(phase) as reactive_power from meters partition by tbname; +``` + +### Write data + +The procedure from the previous scenario is used to write the data. + +### Query the Results +```sql +taos> select ts, meter_location, active_power, reactive_power from power_stream_output_stb; + ts | meter_location | active_power | reactive_power | +=================================================================================================================== + 2018-10-03 14:38:05.000 | California.LosAngeles.d1004 | 2307.834596289 | 688.687331847 | + 2018-10-03 14:38:06.500 | California.LosAngeles.d1004 | 2387.415754896 | 871.474763418 | + 2018-10-03 14:38:05.500 | California.LosAngeles.d1003 | 2506.240411679 | 720.680274962 | + 2018-10-03 14:38:16.600 | California.LosAngeles.d1003 | 2863.424274422 | 854.482390839 | + 2018-10-03 14:38:05.000 | California.SanFrancisco.d1001 | 2148.178871730 | 688.120784090 | + 2018-10-03 14:38:15.000 | California.SanFrancisco.d1001 | 2598.589176205 | 890.081451418 | + 2018-10-03 14:38:16.800 | California.SanFrancisco.d1001 | 2588.728381186 | 829.240910475 | + 2018-10-03 14:38:16.650 | California.SanFrancisco.d1002 | 2175.595991997 | 555.520860397 | +Query OK, 8 rows in database (0.014753s) +``` diff --git a/docs/en/07-develop/07-subscribe.mdx b/docs/en/07-develop/07-subscribe.mdx deleted file mode 100644 index 782fcdbaf221419dd231bd10958e26b8f4f856e5..0000000000000000000000000000000000000000 --- a/docs/en/07-develop/07-subscribe.mdx +++ /dev/null @@ -1,259 +0,0 @@ ---- -sidebar_label: Data Subscription -description: "Lightweight service for data subscription and publishing. Time series data inserted into TDengine continuously can be pushed automatically to subscribing clients." -title: Data Subscription ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; -import Java from "./_sub_java.mdx"; -import Python from "./_sub_python.mdx"; -import Go from "./_sub_go.mdx"; -import Rust from "./_sub_rust.mdx"; -import Node from "./_sub_node.mdx"; -import CSharp from "./_sub_cs.mdx"; -import CDemo from "./_sub_c.mdx"; - -## Introduction - -Due to the nature of time series data, data insertion into TDengine is similar to data publishing in message queues. Data is stored in ascending order of timestamp inside TDengine, and so each table in TDengine can essentially be considered as a message queue. - -A lightweight service for data subscription and publishing is built into TDengine. With the API provided by TDengine, client programs can use `select` statements to subscribe to data from one or more tables. The subscription and state maintenance is performed on the client side. The client programs poll the server to check whether there is new data, and if so the new data will be pushed back to the client side. If the client program is restarted, where to start retrieving new data is up to the client side. - -There are 3 major APIs related to subscription provided in the TDengine client driver. - -```c -taos_subscribe -taos_consume -taos_unsubscribe -``` - -For more details about these APIs please refer to [C/C++ Connector](/reference/connector/cpp). Their usage will be introduced below using the use case of meters, in which the schema of STable and subtables from the previous section [Continuous Query](/develop/continuous-query) are used. Full sample code can be found [here](https://github.com/taosdata/TDengine/blob/master/examples/c/subscribe.c). - -If we want to get a notification and take some actions if the current exceeds a threshold, like 10A, from some meters, there are two ways: - -The first way is to query each sub table and record the last timestamp matching the criteria. Then after some time, query the data later than the recorded timestamp, and repeat this process. The SQL statements for this way are as below. - -```sql -select * from D1001 where ts > {last_timestamp1} and current > 10; -select * from D1002 where ts > {last_timestamp2} and current > 10; -... -``` - -The above way works, but the problem is that the number of `select` statements increases with the number of meters. Additionally, the performance of both client side and server side will be unacceptable once the number of meters grows to a big enough number. - -A better way is to query on the STable, only one `select` is enough regardless of the number of meters, like below: - -```sql -select * from meters where ts > {last_timestamp} and current > 10; -``` - -However, this presents a new problem in how to choose `last_timestamp`. First, the timestamp when the data is generated is different from the timestamp when the data is inserted into the database, sometimes the difference between them may be very big. Second, the time when the data from different meters arrives at the database may be different too. If the timestamp of the "slowest" meter is used as `last_timestamp` in the query, the data from other meters may be selected repeatedly; but if the timestamp of the "fastest" meter is used as `last_timestamp`, some data from other meters may be missed. - -All the problems mentioned above can be resolved easily using the subscription functionality provided by TDengine. - -The first step is to create subscription using `taos_subscribe`. - -```c -TAOS_SUB* tsub = NULL; -if (async) { -  // create an asynchronous subscription, the callback function will be called every 1s -  tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000); -} else { -  // create an synchronous subscription, need to call 'taos_consume' manually -  tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0); -} -``` - -The subscription in TDengine can be either synchronous or asynchronous. In the above sample code, the value of variable `async` is determined from the CLI input, then it's used to create either an async or sync subscription. Sync subscription means the client program needs to invoke `taos_consume` to retrieve data, and async subscription means another thread created by `taos_subscribe` internally invokes `taos_consume` to retrieve data and pass the data to `subscribe_callback` for processing. `subscribe_callback` is a callback function provided by the client program. You should not perform time consuming operations in the callback function. - -The parameter `taos` is an established connection. Nothing special needs to be done for thread safety for synchronous subscription. For asynchronous subscription, the taos_subscribe function should be called exclusively by the current thread, to avoid unpredictable errors. - -The parameter `sql` is a `select` statement in which the `where` clause can be used to specify filter conditions. In our example, we can subscribe to the records in which the current exceeds 10A, with the following SQL statement: - -```sql -select * from meters where current > 10; -``` - -Please note that, all the data will be processed because no start time is specified. If we only want to process data for the past day, a time related condition can be added: - -```sql -select * from meters where ts > now - 1d and current > 10; -``` - -The parameter `topic` is the name of the subscription. The client application must guarantee that the name is unique. However, it doesn't have to be globally unique because subscription is implemented in the APIs on the client side. - -If the subscription named as `topic` doesn't exist, the parameter `restart` will be ignored. If the subscription named as `topic` has been created before by the client program, when the client program is restarted with the subscription named `topic`, parameter `restart` is used to determine whether to retrieve data from the beginning or from the last point where the subscription was broken. - -If the value of `restart` is **true** (i.e. a non-zero value), data will be retrieved from the beginning. If it is **false** (i.e. zero), the data already consumed before will not be processed again. - -The last parameter of `taos_subscribe` is the polling interval in units of millisecond. In sync mode, if the time difference between two continuous invocations to `taos_consume` is smaller than the interval specified by `taos_subscribe`, `taos_consume` will be blocked until the interval is reached. In async mode, this interval is the minimum interval between two invocations to the call back function. - -The second to last parameter of `taos_subscribe` is used to pass arguments to the call back function. `taos_subscribe` doesn't process this parameter and simply passes it to the call back function. This parameter is simply ignored in sync mode. - -After a subscription is created, its data can be consumed and processed. Shown below is the sample code to consume data in sync mode, in the else condition of `if (async)`. - -```c -if (async) { -  getchar(); -} else while(1) { -  TAOS_RES* res = taos_consume(tsub); -  if (res == NULL) { -    printf("failed to consume data."); -    break; -  } else { -    print_result(res, blockFetch); -    getchar(); -  } -} -``` - -In the above sample code in the else condition, there is an infinite loop. Each time carriage return is entered `taos_consume` is invoked. The return value of `taos_consume` is the selected result set. In the above sample, `print_result` is used to simplify the printing of the result set. It is similar to `taos_use_result`. Below is the implementation of `print_result`. - -```c -void print_result(TAOS_RES* res, int blockFetch) { -  TAOS_ROW row = NULL; -  int num_fields = taos_num_fields(res); -  TAOS_FIELD* fields = taos_fetch_fields(res); -  int nRows = 0; -  if (blockFetch) { -    nRows = taos_fetch_block(res, &row); -    for (int i = 0; i < nRows; i++) { -      char temp[256]; -      taos_print_row(temp, row + i, fields, num_fields); -      puts(temp); -    } -  } else { -    while ((row = taos_fetch_row(res))) { -      char temp[256]; -      taos_print_row(temp, row, fields, num_fields); -      puts(temp); -      nRows++; -    } -  } -  printf("%d rows consumed.\n", nRows); -} -``` - -In the above code `taos_print_row` is used to process the data consumed. All matching rows are printed. - -In async mode, consuming data is simpler as shown below. - -```c -void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { -  print_result(res, *(int*)param); -} -``` - -`taos_unsubscribe` can be invoked to terminate a subscription. - -```c -taos_unsubscribe(tsub, keep); -``` - -The second parameter `keep` is used to specify whether to keep the subscription progress on the client sde. If it is **false**, i.e. **0**, then subscription will be restarted from beginning regardless of the `restart` parameter's value when `taos_subscribe` is invoked again. The subscription progress information is stored in _{DataDir}/subscribe/_ , under which there is a file with the same name as `topic` for each subscription(Note: The default value of `DataDir` in the `taos.cfg` file is **/var/lib/taos/**. However, **/var/lib/taos/** does not exist on the Windows server. So you need to change the `DataDir` value to the corresponding existing directory."), the subscription will be restarted from the beginning if the corresponding progress file is removed. - -Now let's see the effect of the above sample code, assuming below prerequisites have been done. - -- The sample code has been downloaded to local system -- TDengine has been installed and launched properly on same system -- The database, STable, and subtables required in the sample code are ready - -Launch the command below in the directory where the sample code resides to compile and start the program. - -```bash -make -./subscribe -sql='select * from meters where current > 10;' -``` - -After the program is started, open another terminal and launch TDengine CLI `taos`, then use the below SQL commands to insert a row whose current is 12A into table **D1001**. - -```sql -use test; -insert into D1001 values(now, 12, 220, 1); -``` - -Then, this row of data will be shown by the example program on the first terminal because its current exceeds 10A. More data can be inserted for you to observe the output of the example program. - -## Examples - -The example program below demonstrates how to subscribe, using connectors, to data rows in which current exceeds 10A. - -### Prepare Data - -```bash -# create database "power" -taos> create database power; -# use "power" as the database in following operations -taos> use power; -# create super table "meters" -taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int); -# create tabes using the schema defined by super table "meters" -taos> create table d1001 using meters tags ("California.SanFrancisco", 2); -taos> create table d1002 using meters tags ("California.LoSangeles", 2); -# insert some rows -taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1); -taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1); -# filter out the rows in which current is bigger than 10A -taos> select * from meters where current > 10; - ts | current | voltage | phase | location | groupid | -=========================================================================================================== - 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | California.LoSangeles | 2 | - 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | California.LoSangeles | 2 | - 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | California.SanFrancisco | 2 | - 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | California.SanFrancisco | 2 | - 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | California.SanFrancisco | 2 | -Query OK, 5 row(s) in set (0.004896s) -``` - -### Example Programs - - - - - - - - - {/* - - */} - - - - {/* - - - - - */} - - - - - -### Run the Examples - -The example programs first consume all historical data matching the criteria. - -```bash -ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 -ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: California.SanFrancisco groupid : 2 -ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 -ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: California.LoSangeles groupid : 2 -ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: California.LoSangeles groupid : 2 -``` - -Next, use TDengine CLI to insert a new row. - -``` -# taos -taos> use power; -taos> insert into d1001 values(now, 12.4, 220, 1); -``` - -Because the current in the inserted row exceeds 10A, it will be consumed by the example program. - -``` -ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: California.SanFrancisco groupid: 2 -``` diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx new file mode 100644 index 0000000000000000000000000000000000000000..17b3f5caa062eaacb4216b7153e899040e702cc1 --- /dev/null +++ b/docs/en/07-develop/07-tmq.mdx @@ -0,0 +1,841 @@ +--- +sidebar_label: Data Subscription +description: "The TDengine data subscription service automatically pushes data written in TDengine to subscribing clients." +title: Data Subscription +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; +import Java from "./_sub_java.mdx"; +import Python from "./_sub_python.mdx"; +import Go from "./_sub_go.mdx"; +import Rust from "./_sub_rust.mdx"; +import Node from "./_sub_node.mdx"; +import CSharp from "./_sub_cs.mdx"; +import CDemo from "./_sub_c.mdx"; + +TDengine provides data subscription and consumption interfaces similar to message queue products. These interfaces make it easier for applications to obtain data written to TDengine either in real time and to process data in the order that events occurred. This simplifies your time-series data processing systems and reduces your costs because it is no longer necessary to deploy a message queue product such as Kafka. + +To use TDengine data subscription, you define topics like in Kafka. However, a topic in TDengine is based on query conditions for an existing supertable, table, or subtable - in other words, a SELECT statement. You can use SQL to filter data by tag, table name, column, or expression and then perform a scalar function or user-defined function on the data. Aggregate functions are not supported. This gives TDengine data subscription more flexibility than similar products. The granularity of data can be controlled on demand by applications, while filtering and preprocessing are handled by TDengine instead of the application layer. This implementation reduces the amount of data transmitted and the complexity of applications. + +By subscribing to a topic, a consumer can obtain the latest data in that topic in real time. Multiple consumers can be formed into a consumer group that consumes messages together. Consumer groups enable faster speed through multi-threaded, distributed data consumption. Note that consumers in different groups that are subscribed to the same topic do not consume messages together. A single consumer can subscribe to multiple topics. If the data in a supertable is sharded across multiple vnodes, consumer groups can consume it much more efficiently than single consumers. TDengine also includes an acknowledgement mechanism that ensures at-least-once delivery in complicated environments where machines may crash or restart. + +To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers. + + + +## Data Schema and API + +The related schemas and APIs in various languages are described as follows: + + + + +```c +typedef struct tmq_t tmq_t; +typedef struct tmq_conf_t tmq_conf_t; +typedef struct tmq_list_t tmq_list_t; + +typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); + +DLL_EXPORT tmq_list_t *tmq_list_new(); +DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); +DLL_EXPORT void tmq_list_destroy(tmq_list_t *); +DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); +DLL_EXPORT const char *tmq_err2str(int32_t code); + +DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); +DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); +DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); +DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); +DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); +DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + +enum tmq_conf_res_t { + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, +}; +typedef enum tmq_conf_res_t tmq_conf_res_t; + +DLL_EXPORT tmq_conf_t *tmq_conf_new(); +DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); +DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); +DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); +``` + +For more information, see [C/C++ Connector](/reference/connector/cpp). + +The following example is based on the smart meter table described in Data Models. For complete sample code, see the C language section below. + + + + +```java +void subscribe(Collection topics) throws SQLException; + +void unsubscribe() throws SQLException; + +Set subscription() throws SQLException; + +ConsumerRecords poll(Duration timeout) throws SQLException; + +void commitAsync(); + +void commitAsync(OffsetCommitCallback callback); + +void commitSync() throws SQLException; + +void close() throws SQLException; +``` + + + + + +```python +class TaosConsumer(): + def __init__(self, *topics, **configs) + + def __iter__(self) + + def __next__(self) + + def sync_next(self) + + def subscription(self) + + def unsubscribe(self) + + def close(self) + + def __del__(self) +``` + + + + + +```go +func NewConsumer(conf *Config) (*Consumer, error) + +func (c *Consumer) Close() error + +func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error + +func (c *Consumer) FreeMessage(message unsafe.Pointer) + +func (c *Consumer) Poll(timeout time.Duration) (*Result, error) + +func (c *Consumer) Subscribe(topics []string) error + +func (c *Consumer) Unsubscribe() error +``` + + + + + +```rust +impl TBuilder for TmqBuilder + fn from_dsn(dsn: D) -> Result + fn build(&self) -> Result + +impl AsAsyncConsumer for Consumer + async fn subscribe, I: IntoIterator + Send>( + &mut self, + topics: I, + ) -> Result<(), Self::Error>; + fn stream( + &self, + ) -> Pin< + Box< + dyn '_ + + Send + + futures::Stream< + Item = Result<(Self::Offset, MessageSet), Self::Error>, + >, + >, + >; + async fn commit(&self, offset: Self::Offset) -> Result<(), Self::Error>; + + async fn unsubscribe(self); +``` + +For more information, see [Crate taos](https://docs.rs/taos). + + + + + +```js +function TMQConsumer(config) + +function subscribe(topic) + +function consume(timeout) + +function subscription() + +function unsubscribe() + +function commit(msg) + +function close() +``` + + + + + +```csharp +ConsumerBuilder(IEnumerable> config) + +virtual IConsumer Build() + +Consumer(ConsumerBuilder builder) + +void Subscribe(IEnumerable topics) + +void Subscribe(string topic) + +ConsumeResult Consume(int millisecondsTimeout) + +List Subscription() + +void Unsubscribe() + +void Commit(ConsumeResult consumerResult) + +void Close() +``` + + + + +## Insert Data into TDengine + +A database including one supertable and two subtables is created as follows: + +```sql +DROP DATABASE IF EXISTS tmqdb; +CREATE DATABASE tmqdb; +CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16) TAGS(t1 INT, t3 VARCHAR(16)); +CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0"); +CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1"); +INSERT INTO tmqdb.ctb0 VALUES(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00'); +INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11'); +``` + +## Create a Topic + +The following SQL statement creates a topic in TDengine: + +```sql +CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1; +``` + +Multiple subscription types are supported. + +#### Subscribe to a Column + +Syntax: + +```sql +CREATE TOPIC topic_name as subquery +``` + +You can subscribe to a topic through a SELECT statement. Statements that specify columns, such as `SELECT *` and `SELECT ts, cl` are supported, as are filtering conditions and scalar functions. Aggregate functions and time window aggregation are not supported. Note: + +- The schema of topics created in this manner is determined by the subscribed data. +- You cannot modify (`ALTER
MODIFY`) or delete (`ALTER
DROP`) columns or tags that are used in a subscription or calculation. +- Columns added to a table after the subscription is created are not displayed in the results. Deleting columns will cause an error. + +### Subscribe to a Supertable + +Syntax: + +```sql +CREATE TOPIC topic_name AS STABLE stb_name +``` + +Creating a topic in this manner differs from a `SELECT * from stbName` statement as follows: + +- The table schema can be modified. +- Unstructured data is returned. The format of the data returned changes based on the supertable schema. +- A different table schema may exist for every data block to be processed. +- The data returned does not include tags. + +### Subscribe to a Database + +Syntax: + +```sql +CREATE TOPIC topic_name [WITH META] AS DATABASE db_name; +``` + +This SQL statement creates a subscription to all tables in the database. You can add the `WITH META` parameter to include schema changes in the subscription, including creating and deleting supertables; adding, deleting, and modifying columns; and creating, deleting, and modifying the tags of subtables. Consumers can determine the message type from the API. Note that this differs from Kafka. + +## Create a Consumer + +You configure the following parameters when creating a consumer: + +| Parameter | Type | Description | Remarks | +| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- | +| `td.connect.ip` | string | Used in establishing a connection; same as `taos_connect` | | +| `td.connect.user` | string | Used in establishing a connection; same as `taos_connect` | | +| `td.connect.pass` | string | Used in establishing a connection; same as `taos_connect` | | +| `td.connect.port` | string | Used in establishing a connection; same as `taos_connect` | | +| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. | +| `client.id` | string | Client ID | Maximum length: 192. | +| `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) | +| `enable.auto.commit` | boolean | Commit automatically | Specify `true` or `false`. | +| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds | +| `enable.heartbeat.background` | boolean | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | | +| `experimental.snapshot.enable` | boolean | Specify whether to consume messages from the WAL or from TSBS | | +| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | + +The method of specifying these parameters depends on the language used: + + + + +```c +/* Create consumer groups on demand (group.id) and enable automatic commits (enable.auto.commit), + an automatic commit interval (auto.commit.interval.ms), and a username (td.connect.user) and password (td.connect.pass) */ +tmq_conf_t* conf = tmq_conf_new(); +tmq_conf_set(conf, "enable.auto.commit", "true"); +tmq_conf_set(conf, "auto.commit.interval.ms", "1000"); +tmq_conf_set(conf, "group.id", "cgrpName"); +tmq_conf_set(conf, "td.connect.user", "root"); +tmq_conf_set(conf, "td.connect.pass", "taosdata"); +tmq_conf_set(conf, "auto.offset.reset", "earliest"); +tmq_conf_set(conf, "experimental.snapshot.enable", "true"); +tmq_conf_set(conf, "msg.with.table.name", "true"); +tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); + +tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); +tmq_conf_destroy(conf); +``` + + + + +Java programs use the following parameters: + +| Parameter | Type | Description | Remarks | +| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- | +| `bootstrap.servers` | string |Connection address, such as `localhost:6030` | +| `value.deserializer` | string | Value deserializer; to use this method, implement the `com.taosdata.jdbc.tmq.Deserializer` interface or inherit the `com.taosdata.jdbc.tmq.ReferenceDeserializer` type | +| `value.deserializer.encoding` | string | Specify the encoding for string deserialization | | + +Note: The `bootstrap.servers` parameter is used instead of `td.connect.ip` and `td.connect.port` to provide an interface that is consistent with Kafka. + +```java +Properties properties = new Properties(); +properties.setProperty("enable.auto.commit", "true"); +properties.setProperty("auto.commit.interval.ms", "1000"); +properties.setProperty("group.id", "cgrpName"); +properties.setProperty("bootstrap.servers", "127.0.0.1:6030"); +properties.setProperty("td.connect.user", "root"); +properties.setProperty("td.connect.pass", "taosdata"); +properties.setProperty("auto.offset.reset", "earliest"); +properties.setProperty("msg.with.table.name", "true"); +properties.setProperty("value.deserializer", "com.taos.example.MetersDeserializer"); + +TaosConsumer consumer = new TaosConsumer<>(properties); + +/* value deserializer definition. */ +import com.taosdata.jdbc.tmq.ReferenceDeserializer; + +public class MetersDeserializer extends ReferenceDeserializer { +} +``` + + + + + +```go +config := tmq.NewConfig() +defer config.Destroy() +err = config.SetGroupID("test") +if err != nil { + panic(err) +} +err = config.SetAutoOffsetReset("earliest") +if err != nil { + panic(err) +} +err = config.SetConnectIP("127.0.0.1") +if err != nil { + panic(err) +} +err = config.SetConnectUser("root") +if err != nil { + panic(err) +} +err = config.SetConnectPass("taosdata") +if err != nil { + panic(err) +} +err = config.SetConnectPort("6030") +if err != nil { + panic(err) +} +err = config.SetMsgWithTableName(true) +if err != nil { + panic(err) +} +err = config.EnableHeartBeat() +if err != nil { + panic(err) +} +err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) { + if result.ErrCode != 0 { + errStr := wrapper.TMQErr2Str(result.ErrCode) + err := errors.NewError(int(result.ErrCode), errStr) + panic(err) + } +}) +if err != nil { + panic(err) +} +``` + + + + + +```rust +let mut dsn: Dsn = "taos://".parse()?; +dsn.set("group.id", "group1"); +dsn.set("client.id", "test"); +dsn.set("auto.offset.reset", "earliest"); + +let tmq = TmqBuilder::from_dsn(dsn)?; + +let mut consumer = tmq.build()?; +``` + + + + + +Python programs use the following parameters: + +| Parameter | Type | Description | Remarks | +| :----------------------------: | :----: | -------------------------------------------------------- | ------------------------------------------- | +| `td_connect_ip` | string | Used in establishing a connection; same as `taos_connect` | | +| `td_connect_user` | string | Used in establishing a connection; same as `taos_connect` | | +| `td_connect_pass` | string | Used in establishing a connection; same as `taos_connect` | | +| `td_connect_port` | string | Used in establishing a connection; same as `taos_connect` | | +| `group_id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. | +| `client_id` | string | Client ID | Maximum length: 192. | +| `auto_offset_reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) | +| `enable_auto_commit` | string | Commit automatically | Specify `true` or `false`. | +| `auto_commit_interval_ms` | string | Interval for automatic commits, in milliseconds | +| `enable_heartbeat_background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false`. | +| `experimental_snapshot_enable` | string | Specify whether to consume messages from the WAL or from TSBS | Specify `true` or `false`. | +| `msg_with_table_name` | string | Specify whether to deserialize table names from messages | Specify `true` or `false`. +| `timeout` | int | Consumer pull timeout | | + + + + + +```js +// Create consumer groups on demand (group.id) and enable automatic commits (enable.auto.commit), +// an automatic commit interval (auto.commit.interval.ms), and a username (td.connect.user) and password (td.connect.pass) + +let consumer = taos.consumer({ + 'enable.auto.commit': 'true', + 'auto.commit.interval.ms','1000', + 'group.id': 'tg2', + 'td.connect.user': 'root', + 'td.connect.pass': 'taosdata', + 'auto.offset.reset','earliest', + 'msg.with.table.name': 'true', + 'td.connect.ip','127.0.0.1', + 'td.connect.port','6030' + }); +``` + + + + + +```csharp +using TDengineTMQ; + +// Create consumer groups on demand (GourpID) and enable automatic commits (EnableAutoCommit), +// an automatic commit interval (AutoCommitIntervalMs), and a username (TDConnectUser) and password (TDConnectPasswd) +var cfg = new ConsumerConfig + { + EnableAutoCommit = "true" + AutoCommitIntervalMs = "1000" + GourpId = "TDengine-TMQ-C#", + TDConnectUser = "root", + TDConnectPasswd = "taosdata", + AutoOffsetReset = "earliest" + MsgWithTableName = "true", + TDConnectIp = "127.0.0.1", + TDConnectPort = "6030" + }; + +var consumer = new ConsumerBuilder(cfg).Build(); + +``` + + + + + +A consumer group is automatically created when multiple consumers are configured with the same consumer group ID. + +## Subscribe to a Topic + +A single consumer can subscribe to multiple topics. + + + + +```c +// Create a list of subscribed topics +tmq_list_t* topicList = tmq_list_new(); +tmq_list_append(topicList, "topicName"); +// Enable subscription +tmq_subscribe(tmq, topicList); +tmq_list_destroy(topicList); + +``` + + + + +```java +List topics = new ArrayList<>(); +topics.add("tmq_topic"); +consumer.subscribe(topics); +``` + + + + +```go +consumer, err := tmq.NewConsumer(config) +if err != nil { + panic(err) +} +err = consumer.Subscribe([]string{"example_tmq_topic"}) +if err != nil { + panic(err) +} +``` + + + + +```rust +consumer.subscribe(["tmq_meters"]).await?; +``` + + + + + +```python +consumer = TaosConsumer('topic_ctb_column', group_id='vg2') +``` + + + + + +```js +// Create a list of subscribed topics +let topics = ['topic_test'] + +// Enable subscription +consumer.subscribe(topics); +``` + + + + + +```csharp +// Create a list of subscribed topics +List topics = new List(); +topics.add("tmq_topic"); +// Enable subscription +consumer.Subscribe(topics); +``` + + + + + +## Consume messages + +The following code demonstrates how to consume the messages in a queue. + + + + +```c +## Consume data +while (running) { + TAOS_RES* msg = tmq_consumer_poll(tmq, timeOut); + msg_process(msg); +} +``` + +The `while` loop obtains a message each time it calls `tmq_consumer_poll()`. This message is exactly the same as the result returned by a query, and the same deserialization API can be used on it. + + + + +```java +while(running){ + ConsumerRecords meters = consumer.poll(Duration.ofMillis(100)); + for (Meters meter : meters) { + processMsg(meter); + } +} +``` + + + + + +```go +for { + result, err := consumer.Poll(time.Second) + if err != nil { + panic(err) + } + fmt.Println(result) + consumer.Commit(context.Background(), result.Message) + consumer.FreeMessage(result.Message) +} +``` + + + + + +```rust +{ + let mut stream = consumer.stream(); + + while let Some((offset, message)) = stream.try_next().await? { + // get information from offset + + // the topic + let topic = offset.topic(); + // the vgroup id, like partition id in kafka. + let vgroup_id = offset.vgroup_id(); + println!("* in vgroup id {vgroup_id} of topic {topic}\n"); + + if let Some(data) = message.into_data() { + while let Some(block) = data.fetch_raw_block().await? { + // one block for one table, get table name if needed + let name = block.table_name(); + let records: Vec = block.deserialize().try_collect()?; + println!( + "** table: {}, got {} records: {:#?}\n", + name.unwrap(), + records.len(), + records + ); + } + } + consumer.commit(offset).await?; + } +} +``` + + + + +```python +for msg in consumer: + for row in msg: + print(row) +``` + + + + + +```js +while(true){ + msg = consumer.consume(200); + // process message(consumeResult) + console.log(msg.topicPartition); + console.log(msg.block); + console.log(msg.fields) +} +``` + + + + + +```csharp +## Consume data +while (true) +{ + var consumerRes = consumer.Consume(100); + // process ConsumeResult + ProcessMsg(consumerRes); + consumer.Commit(consumerRes); +} +``` + + + + + +## Close the consumer + +After message consumption is finished, the consumer is unsubscribed. + + + + +```c +/* Unsubscribe */ +tmq_unsubscribe(tmq); + +/* Close consumer object */ +tmq_consumer_close(tmq); +``` + + + + +```java +/* Unsubscribe */ +consumer.unsubscribe(); + +/* Close consumer */ +consumer.close(); +``` + + + + + +```go +consumer.Close() +``` + + + + + +```rust +consumer.unsubscribe().await; +``` + + + + + +```py +# Unsubscribe +consumer.unsubscribe() +# Close consumer +consumer.close() +``` + + + + +```js +consumer.unsubscribe(); +consumer.close(); +``` + + + + + +```csharp +// Unsubscribe +consumer.Unsubscribe(); + +// Close consumer +consumer.Close(); +``` + + + + + +## Delete a Topic + +You can delete topics that are no longer useful. Note that you must unsubscribe all consumers from a topic before deleting it. + +```sql +/* Delete topic/ +DROP TOPIC topic_name; +``` + +## Check Status + +1. Query all existing topics. + +```sql +SHOW TOPICS; +``` + +2. Query the status and subscribed topics of all consumers. + +```sql +SHOW CONSUMERS; +``` + +3. Query the relationships between consumers and vgroups. + +```sql +SHOW SUBSCRIPTIONS; +``` + +## Examples + +The following section shows sample code in various languages. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/en/07-develop/08-cache.md b/docs/en/07-develop/08-cache.md index 8e86eff7414a02ad36a965eb092b8b9b65343301..82a4787016f608f8e32e89b1747443b7cd164551 100644 --- a/docs/en/07-develop/08-cache.md +++ b/docs/en/07-develop/08-cache.md @@ -1,52 +1,49 @@ --- -sidebar_label: Cache -title: Cache -description: "Caching System inside TDengine" +sidebar_label: Caching +title: Caching +description: "This document describes the caching component of TDengine." --- -To achieve the purpose of high performance data writing and querying, TDengine employs a lot of caching technologies in both server side and client side. +TDengine uses various kinds of caching techniques to efficiently write and query data. This document describes the caching component of TDengine. ## Write Cache -The cache management policy in TDengine is First-In-First-Out (FIFO). FIFO is also known as insert driven cache management policy and it is different from read driven cache management, which is more commonly known as Least-Recently-Used (LRU). FIFO simply stores the latest data in cache and flushes the oldest data in cache to disk, when the cache usage reaches a threshold. In IoT use cases, it is the current state i.e. the latest or most recent data that is important. The cache policy in TDengine, like much of the design and architecture of TDengine, is based on the nature of IoT data. +TDengine uses an insert-driven cache management policy, known as first in, first out (FIFO). This policy differs from read-driven "least recently used (LRU)" cache management. A FIFO policy stores the latest data in cache and flushes the oldest data from cache to disk when the cache usage reaches a threshold. In IoT use cases, the most recent data or the current state is most important. The cache policy in TDengine, like much of the design and architecture of TDengine, is based on the nature of IoT data. -The memory space used by each vnode as write cache is determined when creating a database. Parameter `vgroups` and `buffer` can be used to specify the number of vnode and the size of write cache for each vnode when creating the database. Then, the total size of write cache for this database is `vgroups * buffer`. +When you create a database, you can configure the size of the write cache on each vnode. The **vgroups** parameter determines the number of vgroups that process data in the database, and the **buffer** parameter determines the size of the write cache for each vnode. ```sql create database db0 vgroups 100 buffer 16MB ``` -The above statement creates a database of 100 vnodes while each vnode has a write cache of 16MB. - -Even though in theory it's always better to have a larger cache, the extra effect would be very minor once the size of cache grows beyond a threshold. So normally it's enough to use the default value of `buffer` parameter. +In theory, larger cache sizes are always better. However, at a certain point, it becomes impossible to improve performance by increasing cache size. In most scenarios, you can retain the default cache settings. ## Read Cache -When creating a database, it's also possible to specify whether to cache the latest data of each sub table, using parameter `cachelast`. There are 3 cases: -- 0: No cache for latest data -- 1: The last row of each table is cached, `last_row` function can benefit significantly from it -- 2: The latest non-NULL value of each column for each table is cached, `last` function can benefit very much when there is no `where`, `group by`, `order by` or `interval` clause -- 3: Bot hthe last row and the latest non-NULL value of each column for each table are cached, identical to the behavior of both 1 and 2 are set together - +When you create a database, you can configure whether the latest data from every subtable is cached. To do so, set the *cachemodel* parameter as follows: +- none: Caching is disabled. +- last_row: The latest row of data in each subtable is cached. This option significantly improves the performance of the `LAST_ROW` function +- last_value: The latest non-null value in each column of each subtable is cached. This option significantly improves the performance of the `LAST` function in normal situations, such as WHERE, ORDER BY, GROUP BY, and INTERVAL statements. +- both: Rows and columns are both cached. This option is equivalent to simultaneously enabling option last_row and last_value. -## Meta Cache +## Metadata Cache -To process data writing and querying efficiently, each vnode caches the metadata that's already retrieved. Parameters `pages` and `pagesize` are used to specify the size of metadata cache for each vnode. +To improve query and write performance, each vnode caches the metadata that it receives. When you create a database, you can configure the size of the metadata cache through the *pages* and *pagesize* parameters. ```sql create database db0 pages 128 pagesize 16kb ``` -The above statement will create a database db0 each of whose vnode is allocated a meta cache of `128 * 16 KB = 2 MB` . +The preceding SQL statement creates 128 pages on each vnode in the `db0` database. Each page has a 16 KB metadata cache. ## File System Cache -TDengine utilizes WAL to provide basic reliability. The essential of WAL is to append data in a disk file, so the file system cache also plays an important role in the writing performance. Parameter `wal` can be used to specify the policy of writing WAL, there are 2 cases: -- 1: Write data to WAL without calling fsync, the data is actually written to the file system cache without flushing immediately, in this way you can get better write performance -- 2: Write data to WAL and invoke fsync, the data is immediately flushed to disk, in this way you can get higher reliability +TDengine implements data reliability by means of a write-ahead log (WAL). Writing data to the WAL is essentially writing data to the disk in an ordered, append-only manner. For this reason, the file system cache plays an important role in write performance. When you create a database, you can use the *wal* parameter to choose higher performance or higher reliability. +- 1: This option writes to the WAL but does not enable fsync. New data written to the WAL is stored in the file system cache but not written to disk. This provides better performance. +- 2: This option writes to the WAL and enables fsync. New data written to the WAL is immediately written to disk. This provides better data reliability. ## Client Cache -To improve the overall efficiency of processing data, besides the above caches, the core library `libtaos.so` (also referred to as `taosc`) which all client programs depend on also has its own cache. `taosc` caches the metadata of the databases, super tables, tables that the invoking client has accessed, plus other critical metadata such as the cluster topology. +In addition to the server-side caching discussed previously, the core client library `libtaos.so` also makes use of caching. TDengine Client caches the metadata of all databases, supertables, and subtables that it has accessed, as well as the cluster topology. -When multiple client programs are accessing a TDengine cluster, if one of the clients modifies some metadata, the cache may become invalid in other clients. If this case happens, the client programs need to "reset query cache" to invalidate the whole cache so that `taosc` is enforced to repull the metadata it needs to rebuild the cache. +If a client modifies certain metadata while multiple clients are simultaneously accessing a TDengine cluster, the metadata caches on each client may fail or become out of sync. If this occurs, run the `reset query cache` command on the affected clientsto force them to obtain fresh metadata and reset their caches. diff --git a/docs/en/07-develop/09-udf.md b/docs/en/07-develop/09-udf.md index 49bc95bd91a4c31d42d2b21ef05d69225f1bd963..deb9c4cdb5b50edf7b48537f607ac47edc1246fd 100644 --- a/docs/en/07-develop/09-udf.md +++ b/docs/en/07-develop/09-udf.md @@ -1,240 +1,245 @@ --- sidebar_label: UDF -title: User Defined Functions(UDF) -description: "Scalar functions and aggregate functions developed by users can be utilized by the query framework to expand query capability" +title: User-Defined Functions (UDF) +description: "You can define your own scalar and aggregate functions to expand the query capabilities of TDengine." --- -In some use cases, built-in functions are not adequate for the query capability required by application programs. With UDF, the functions developed by users can be utilized by the query framework to meet business and application requirements. UDF normally takes one column of data as input, but can also support the result of a sub-query as input. +The built-in functions of TDengine may not be sufficient for the use cases of every application. In this case, you can define custom functions for use in TDengine queries. These are known as user-defined functions (UDF). A user-defined function takes one column of data or the result of a subquery as its input. -From version 2.2.0.0, UDF written in C/C++ are supported by TDengine. +TDengine supports user-defined functions written in C or C++. This document describes the usage of user-defined functions. +User-defined functions can be scalar functions or aggregate functions. Scalar functions, such as `abs`, `sin`, and `concat`, output a value for every row of data. Aggregate functions, such as `avg` and `max` output one value for multiple rows of data. -## Types of UDF +When you create a user-defined function, you must implement standard interface functions: +- For scalar functions, implement the `scalarfn` interface function. +- For aggregate functions, implement the `aggfn_start`, `aggfn`, and `aggfn_finish` interface functions. +- To initialize your function, implement the `udf_init` function. To terminate your function, implement the `udf_destroy` function. -Two kinds of functions can be implemented by UDF: scalar functions and aggregate functions. +There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be _start, _finish, _init, and _destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function. -Scalar functions return multiple rows and aggregate functions return either 0 or 1 row. - -In the case of a scalar function you only have to implement the "normal" function template. - -In the case of an aggregate function, in addition to the "normal" function, you also need to implement the "merge" and "finalize" function templates even if the implementation is empty. This will become clear in the sections below. - -### Scalar Function - -As mentioned earlier, a scalar UDF only has to implement the "normal" function template. The function template below can be used to define your own scalar function. - -`void udfNormalFunc(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput, int* numOfOutput, short otype, short obytes, SUdfInit* buf)` - -`udfNormalFunc` is the place holder for a function name. A function implemented based on the above template can be used to perform scalar computation on data rows. The parameters are fixed to control the data exchange between UDF and TDengine. - -- Definitions of the parameters: - - - data:input data - - itype:the type of input data, for details please refer to [type definition in column_meta](/reference/rest-api/), for example 4 represents INT - - iBytes:the number of bytes consumed by each value in the input data - - oType:the type of output data, similar to iType - - oBytes:the number of bytes consumed by each value in the output data - - numOfRows:the number of rows in the input data - - ts: the column of timestamp corresponding to the input data - - dataOutput:the buffer for output data, total size is `oBytes * numberOfRows` - - interBuf:the buffer for an intermediate result. Its size is specified by the `BUFSIZE` parameter when creating a UDF. It's normally used when the intermediate result is not same as the final result. This buffer is allocated and freed by TDengine. - - tsOutput:the column of timestamps corresponding to the output data; it can be used to output timestamp together with the output data if it's not NULL - - numOfOutput:the number of rows in output data - - buf:for the state exchange between UDF and TDengine +## Implementing a Scalar Function +The implementation of a scalar function is described as follows: +```c +#include "taos.h" +#include "taoserror.h" +#include "taosudf.h" + +// initialization function. if no initialization, we can skip definition of it. The initialization function shall be concatenation of the udf name and _init suffix +// @return error number defined in taoserror.h +int32_t scalarfn_init() { + // initialization. + return TSDB_CODE_SUCCESS; +} + +// scalar function main computation function +// @param inputDataBlock, input data block composed of multiple columns with each column defined by SUdfColumn +// @param resultColumn, output column +// @return error number defined in taoserror.h +int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn* resultColumn) { + // read data from inputDataBlock and process, then output to resultColumn. + return TSDB_CODE_SUCCESS; +} + +// cleanup function. if no cleanup related processing, we can skip definition of it. The destroy function shall be concatenation of the udf name and _destroy suffix. +// @return error number defined in taoserror.h +int32_t scalarfn_destroy() { + // clean up + return TSDB_CODE_SUCCESS; +} +``` +Replace `scalarfn` with the name of your function. - [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) is one example of a very simple UDF implementation, i.e. one instance of the above `udfNormalFunc` template. It adds one to each value of a passed in column, which can be filtered using the `where` clause, and outputs the result. +## Implementing an Aggregate Function -### Aggregate Function +The implementation of an aggregate function is described as follows: +```c +#include "taos.h" +#include "taoserror.h" +#include "taosudf.h" + +// Initialization function. if no initialization, we can skip definition of it. The initialization function shall be concatenation of the udf name and _init suffix +// @return error number defined in taoserror.h +int32_t aggfn_init() { + // initialization. + return TSDB_CODE_SUCCESS; +} + +// aggregate start function. The intermediate value or the state(@interBuf) is initialized in this function. The function name shall be concatenation of udf name and _start suffix +// @param interbuf intermediate value to intialize +// @return error number defined in taoserror.h +int32_t aggfn_start(SUdfInterBuf* interBuf) { + // initialize intermediate value in interBuf + return TSDB_CODE_SUCESS; +} + +// aggregate reduce function. This function aggregate old state(@interbuf) and one data bock(inputBlock) and output a new state(@newInterBuf). +// @param inputBlock input data block +// @param interBuf old state +// @param newInterBuf new state +// @return error number defined in taoserror.h +int32_t aggfn(SUdfDataBlock* inputBlock, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf) { + // read from inputBlock and interBuf and output to newInterBuf + return TSDB_CODE_SUCCESS; +} + +// aggregate function finish function. This function transforms the intermediate value(@interBuf) into the final output(@result). The function name must be concatenation of aggfn and _finish suffix. +// @interBuf : intermediate value +// @result: final result +// @return error number defined in taoserror.h +int32_t int32_t aggfn_finish(SUdfInterBuf* interBuf, SUdfInterBuf *result) { + // read data from inputDataBlock and process, then output to result + return TSDB_CODE_SUCCESS; +} + +// cleanup function. if no cleanup related processing, we can skip definition of it. The destroy function shall be concatenation of the udf name and _destroy suffix. +// @return error number defined in taoserror.h +int32_t aggfn_destroy() { + // clean up + return TSDB_CODE_SUCCESS; +} +``` +Replace `aggfn` with the name of your function. -For aggregate UDF, as mentioned earlier you must implement a "normal" function template (described above) and also implement the "merge" and "finalize" templates. +## Interface Functions -#### Merge Function Template +There are strict naming conventions for interface functions. The names of the start, finish, init, and destroy interfaces must be _start, _finish, _init, and _destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function. -The function template below can be used to define your own merge function for an aggregate UDF. +Interface functions return a value that indicates whether the operation was successful. If an operation fails, the interface function returns an error code. Otherwise, it returns TSDB_CODE_SUCCESS. The error codes are defined in `taoserror.h` and in the common API error codes in `taos.h`. For example, TSDB_CODE_UDF_INVALID_INPUT indicates invalid input. TSDB_CODE_OUT_OF_MEMORY indicates insufficient memory. -`void udfMergeFunc(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf)` +For information about the parameters for interface functions, see Data Model -`udfMergeFunc` is the place holder for a function name. The function implemented with the above template is used to aggregate intermediate results and can only be used in the aggregate query for STable. +### Interfaces for Scalar Functions -Definitions of the parameters: + `int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)` + + Replace `scalarfn` with the name of your function. This function performs scalar calculations on data blocks. You can configure a value through the parameters in the `resultColumn` structure. -- data:array of output data, if interBuf is used it's an array of interBuf -- numOfRows:number of rows in `data` -- dataOutput:the buffer for output data, the size is same as that of the final result; If the result is not final, it can be put in the interBuf, i.e. `data`. -- numOfOutput:number of rows in the output data -- buf:for the state exchange between UDF and TDengine +The parameters in the function are defined as follows: + - inputDataBlock: The data block to input. + - resultColumn: The column to output. The column to output. -#### Finalize Function Template +### Interfaces for Aggregate Functions -The function template below can be used to finalize the result of your own UDF, normally used when interBuf is used. +`int32_t aggfn_start(SUdfInterBuf *interBuf)` -`void udfFinalizeFunc(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf)` +`int32_t aggfn(SUdfDataBlock* inputBlock, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf)` -`udfFinalizeFunc` is the place holder of function name, definitions of the parameter are as below: +`int32_t aggfn_finish(SUdfInterBuf* interBuf, SUdfInterBuf *result)` -- dataOutput:buffer for output data -- interBuf:buffer for intermediate result, can be used as input for next processing step -- numOfOutput:number of output data, can only be 0 or 1 for aggregate function -- buf:for state exchange between UDF and TDengine +Replace `aggfn` with the name of your function. In the function, aggfn_start is called to generate a result buffer. Data is then divided between multiple blocks, and aggfn is called on each block to update the result. Finally, aggfn_finish is called to generate final results from the intermediate results. The final result contains only one or zero data points. -### Example abs_max.c +The parameters in the function are defined as follows: + - interBuf: The intermediate result buffer. + - inputBlock: The data block to input. + - newInterBuf: The new intermediate result buffer. + - result: The final result. -[abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) is an example of a user defined aggregate function to get the maximum from the absolute values of a column. -The internal processing happens as follows. The results of the select statement are divided into multiple row blocks and `udfNormalFunc`, i.e. `abs_max` in this case, is performed on each row block to generate the intermediate results for each sub table. Then `udfMergeFunc`, i.e. `abs_max_merge` in this case, is performed on the intermediate result of sub tables to aggregate and generate the final or intermediate result of STable. The intermediate result of STable is finally processed by `udfFinalizeFunc`, i.e. `abs_max_finalize` in this example, to generate the final result, which contains either 0 or 1 row. +### Initializing and Terminating User-Defined Functions +`int32_t udf_init()` -Other typical aggregation functions such as covariance, can also be implemented using aggregate UDF. +`int32_t udf_destroy()` -## UDF Naming Conventions +Replace `udf`with the name of your function. udf_init initializes the function. udf_destroy terminates the function. If it is not necessary to initialize your function, udf_init is not required. If it is not necessary to terminate your function, udf_destroy is not required. -The naming convention for the 3 kinds of function templates required by UDF is as follows: - - udfNormalFunc, udfMergeFunc, and udfFinalizeFunc are required to have same prefix, i.e. the actual name of udfNormalFunc. The udfNormalFunc doesn't need a suffix following the function name. - - udfMergeFunc should be udfNormalFunc followed by `_merge` - - udfFinalizeFunc should be udfNormalFunc followed by `_finalize`. - -The naming convention is part of TDengine's UDF framework. TDengine follows this convention to invoke the corresponding actual functions. -Depending on whether you are creating a scalar UDF or aggregate UDF, the functions that you need to implement are different. +## Data Structure of User-Defined Functions +```c +typedef struct SUdfColumnMeta { + int16_t type; + int32_t bytes; + uint8_t precision; + uint8_t scale; +} SUdfColumnMeta; + +typedef struct SUdfColumnData { + int32_t numOfRows; + int32_t rowsAlloc; + union { + struct { + int32_t nullBitmapLen; + char *nullBitmap; + int32_t dataLen; + char *data; + } fixLenCol; + + struct { + int32_t varOffsetsLen; + int32_t *varOffsets; + int32_t payloadLen; + char *payload; + int32_t payloadAllocLen; + } varLenCol; + }; +} SUdfColumnData; + +typedef struct SUdfColumn { + SUdfColumnMeta colMeta; + bool hasNull; + SUdfColumnData colData; +} SUdfColumn; + +typedef struct SUdfDataBlock { + int32_t numOfRows; + int32_t numOfCols; + SUdfColumn **udfCols; +} SUdfDataBlock; + +typedef struct SUdfInterBuf { + int32_t bufLen; + char* buf; + int8_t numOfResult; //zero or one +} SUdfInterBuf; +``` +The data structure is described as follows: -- Scalar function:udfNormalFunc is required. -- Aggregate function:udfNormalFunc, udfMergeFunc (if query on STable) and udfFinalizeFunc are required. +- The SUdfDataBlock block includes the number of rows (numOfRows) and number of columns (numCols). udfCols[i] (0 <= i <= numCols-1) indicates that each column is of type SUdfColumn. +- SUdfColumn includes the definition of the data type of the column (colMeta) and the data in the column (colData). +- The member definitions of SUdfColumnMeta are the same as the data type definitions in `taos.h`. +- The data in SUdfColumnData can become longer. varLenCol indicates variable-length data, and fixLenCol indicates fixed-length data. +- SUdfInterBuf defines the intermediate structure `buffer` and the number of results in the buffer `numOfResult`. -For clarity, assuming we want to implement a UDF named "foo": -- If the function is a scalar function, we only need to implement the "normal" function template and it should be named simply `foo`. -- If the function is an aggregate function, we need to implement `foo`, `foo_merge`, and `foo_finalize`. Note that for aggregate UDF, even though one of the three functions is not necessary, there must be an empty implementation. +Additional functions are defined in `taosudf.h` to make it easier to work with these structures. ## Compile UDF -The source code of UDF in C can't be utilized by TDengine directly. UDF can only be loaded into TDengine after compiling to dynamically linked library (DLL). +To use your user-defined function in TDengine, first compile it to a dynamically linked library (DLL). -For example, the example UDF `add_one.c` mentioned earlier, can be compiled into DLL using the command below, in a Linux Shell. +For example, the sample UDF `add_one.c` can be compiled into a DLL as follows: ```bash gcc -g -O0 -fPIC -shared add_one.c -o add_one.so ``` -The generated DLL file `add_one.so` can be used later when creating a UDF. It's recommended to use GCC not older than 7.5. - -## Create and Use UDF - -When a UDF is created in a TDengine instance, it is available across the databases in that instance. - -### Create UDF - -SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted. - -When creating UDF, the type of UDF, i.e. a scalar function or aggregate function must be specified. If the specified type is wrong, the SQL statements using the function would fail with errors. The input type and output type don't need to be the same in UDF, but the input data type and output data type must be consistent with the UDF definition. - -- Create Scalar Function - -```sql -CREATE FUNCTION userDefinedFunctionName AS "/absolute/path/to/userDefinedFunctionName.so" OUTPUTTYPE [BUFSIZE B]; -``` - -- userDefinedFunctionName:The function name to be used in SQL statement which must be consistent with the function name defined by `udfNormalFunc` and is also the name of the compiled DLL (.so file). -- path:The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes. -- outputtype:The output data type, the value is the literal string of the supported TDengine data type. -- B:the size of intermediate buffer, in bytes; it is an optional parameter and the range is [0,512]. - -For example, below SQL statement can be used to create a UDF from `add_one.so`. - -```sql -CREATE FUNCTION add_one AS "/home/taos/udf_example/add_one.so" OUTPUTTYPE INT; -``` - -- Create Aggregate Function - -```sql -CREATE AGGREGATE FUNCTION userDefinedFunctionName AS "/absolute/path/to/userDefinedFunctionName.so" OUTPUTTYPE [ BUFSIZE B ]; -``` - -- userDefinedFunctionName:the function name to be used in SQL statement which must be consistent with the function name defined by `udfNormalFunc` and is also the name of the compiled DLL (.so file). -- path:the absolute path of the DLL file including the name of the shared object file (.so). The path needs to be quoted by single or double quotes. -- OUTPUTTYPE:the output data type, the value is the literal string of the type -- B:the size of intermediate buffer, in bytes; it's an optional parameter and the range is [0,512] - -For details about how to use intermediate result, please refer to example program [demo.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c). - -For example, below SQL statement can be used to create a UDF from `demo.so`. - -```sql -CREATE AGGREGATE FUNCTION demo AS "/home/taos/udf_example/demo.so" OUTPUTTYPE DOUBLE bufsize 14; -``` - -### Manage UDF - -- Delete UDF - -``` -DROP FUNCTION ids(X); -``` - -- ids(X):same as that in `CREATE FUNCTION` statement - -```sql -DROP FUNCTION add_one; -``` - -- Show Available UDF +The generated DLL file `add_one.so` can now be used to implement your function. Note: GCC 7.5 or later is required. -```sql -SHOW FUNCTIONS; -``` - -### Use UDF - -The function name specified when creating UDF can be used directly in SQL statements, just like builtin functions. - -```sql -SELECT X(c) FROM table/STable; -``` - -The above SQL statement invokes function X for column c. - -## Restrictions for UDF - -In current version there are some restrictions for UDF +## Manage and Use User-Defined Functions +After compiling your function into a DLL, you add it to TDengine. For more information, see [User-Defined Functions](../12-taos-sql/26-udf.md). -1. Only Linux is supported when creating and invoking UDF for both client side and server side -2. UDF can't be mixed with builtin functions -3. Only one UDF can be used in a SQL statement -4. Only a single column is supported as input for UDF -5. Once created successfully, UDF is persisted in MNode of TDengineUDF -6. UDF can't be created through REST interface -7. The function name used when creating UDF in SQL must be consistent with the function name defined in the DLL, i.e. the name defined by `udfNormalFunc` -8. The name of a UDF should not conflict with any of TDengine's built-in functions +## Sample Code -## Examples +### Sample scalar function: [bit_and](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/bit_and.c) -### Scalar function example [add_one](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) +The bit_add function implements bitwise addition for multiple columns. If there is only one column, the column is returned. The bit_add function ignores null values.
-add_one.c +bit_and.c ```c -{{#include tests/script/sh/add_one.c}} +{{#include tests/script/sh/bit_and.c}} ```
-### Aggregate function example [abs_max](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) - -
-abs_max.c - -```c -{{#include tests/script/sh/abs_max.c}} -``` - -
+### Sample aggregate function: [l2norm](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/l2norm.c) -### Example for using intermediate result [demo](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c) +The l2norm function finds the second-order norm for all data in the input column. This squares the values, takes a cumulative sum, and finds the square root.
-demo.c +l2norm.c ```c -{{#include tests/script/sh/demo.c}} +{{#include tests/script/sh/l2norm.c}} ```
diff --git a/docs/en/07-develop/_sub_c.mdx b/docs/en/07-develop/_sub_c.mdx index da492a0269f064d8cdf9dfb80969894131d94015..b0667268e9978533e84e68ea3fe5f285538df762 100644 --- a/docs/en/07-develop/_sub_c.mdx +++ b/docs/en/07-develop/_sub_c.mdx @@ -1,3 +1,3 @@ ```c -{{#include docs/examples/c/subscribe_demo.c}} -``` \ No newline at end of file +{{#include docs/examples/c/tmq_example.c}} +``` diff --git a/docs/en/07-develop/_sub_java.mdx b/docs/en/07-develop/_sub_java.mdx index ab77f61348c115d3fe3336df47d467c5525f41b8..d14b5fd6095dd90f89dd2c2e828858585cfddff9 100644 --- a/docs/en/07-develop/_sub_java.mdx +++ b/docs/en/07-develop/_sub_java.mdx @@ -1,7 +1,11 @@ ```java {{#include docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java}} +{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}} +{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}} ``` -:::note -For now Java connector doesn't provide asynchronous subscription, but `TimerTask` can be used to achieve similar purpose. - -::: \ No newline at end of file +```java +{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}} +``` +```java +{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}} +``` \ No newline at end of file diff --git a/docs/en/07-develop/_sub_python.mdx b/docs/en/07-develop/_sub_python.mdx index 490b76fca6deb61e61dc59c2096b30742a7d25f7..1309da5b416799492a6b85aae4b775e227c0ad6e 100644 --- a/docs/en/07-develop/_sub_python.mdx +++ b/docs/en/07-develop/_sub_python.mdx @@ -1,3 +1,3 @@ ```py -{{#include docs/examples/python/subscribe_demo.py}} -``` \ No newline at end of file +{{#include docs/examples/python/tmq_example.py}} +``` diff --git a/docs/en/07-develop/_sub_rust.mdx b/docs/en/07-develop/_sub_rust.mdx index afb8d79daa3bbd72d72795cb4425f12277d710fc..0021666a7024a9b63d6b9c38bf8a57b6eded6d66 100644 --- a/docs/en/07-develop/_sub_rust.mdx +++ b/docs/en/07-develop/_sub_rust.mdx @@ -1,3 +1,3 @@ -```rs +```rust {{#include docs/examples/rust/nativeexample/examples/subscribe_demo.rs}} -``` \ No newline at end of file +``` diff --git a/docs/en/07-develop/index.md b/docs/en/07-develop/index.md index e3f55f290753f79ac1708337082ce90bb050b21f..1ef5e23f72f707f7a9decce6ea0bfed8fd642c0c 100644 --- a/docs/en/07-develop/index.md +++ b/docs/en/07-develop/index.md @@ -2,13 +2,12 @@ title: Developer Guide --- -To develop an application to process time-series data using TDengine, we recommend taking the following steps: - -1. Choose the method to connect to TDengine. No matter what programming language you use, you can always use the REST interface to access TDengine, but you can also use connectors unique to each programming language. -2. Design the data model based on your own use cases. Learn the [concepts](/concept/) of TDengine including "one table for one data collection point" and the "super table" (STable) concept; learn about static labels, collected metrics, and subtables. Depending on the characteristics of your data and your requirements, you may decide to create one or more databases, and you should design the STable schema to fit your data. +Before creating an application to process time-series data with TDengine, consider the following: +1. Choose the method to connect to TDengine. TDengine offers a REST API that can be used with any programming language. It also has connectors for a variety of languages. +2. Design the data model based on your own use cases. Consider the main [concepts](/concept/) of TDengine, including "one table per data collection point" and the supertable. Learn about static labels, collected metrics, and subtables. Depending on the characteristics of your data and your requirements, you decide to create one or more databases and design a supertable schema that fit your data. 3. Decide how you will insert data. TDengine supports writing using standard SQL, but also supports schemaless writing, so that data can be written directly without creating tables manually. 4. Based on business requirements, find out what SQL query statements need to be written. You may be able to repurpose any existing SQL. -5. If you want to run real-time analysis based on time series data, including various dashboards, it is recommended that you use the TDengine continuous query feature instead of deploying complex streaming processing systems such as Spark or Flink. +5. If you want to run real-time analysis based on time series data, including various dashboards, use the TDengine stream processing component instead of deploying complex systems such as Spark or Flink. 6. If your application has modules that need to consume inserted data, and they need to be notified when new data is inserted, it is recommended that you use the data subscription function provided by TDengine without the need to deploy Kafka. 7. In many use cases (such as fleet management), the application needs to obtain the latest status of each data collection point. It is recommended that you use the cache function of TDengine instead of deploying Redis separately. 8. If you find that the SQL functions of TDengine cannot meet your requirements, then you can use user-defined functions to solve the problem. diff --git a/docs/en/10-cluster/01-deploy.md b/docs/en/10-cluster/01-deploy.md deleted file mode 100644 index d998fd6ad0061ae2585e2155703bf5895ab6787d..0000000000000000000000000000000000000000 --- a/docs/en/10-cluster/01-deploy.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: Deployment ---- - -## Prerequisites - -### Step 1 - -The FQDN of all hosts must be setup properly. All FQDNs need to be configured in the /etc/hosts file on each host. You must confirm that each FQDN can be accessed from any other host, you can do this by using the `ping` command. - -The command `hostname -f` can be executed to get the hostname on any host. `ping ` command can be executed on each host to check whether any other host is accessible from it. If any host is not accessible, the network configuration, like /etc/hosts or DNS configuration, needs to be checked and revised, to make any two hosts accessible to each other. - -:::note - -- The host where the client program runs also needs to be configured properly for FQDN, to make sure all hosts for client or server can be accessed from any other. In other words, the hosts where the client is running are also considered as a part of the cluster. - -- Please ensure that your firewall rules do not block TCP/UDP on ports 6030-6042 on all hosts in the cluster. - -::: - -### Step 2 - -If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](/operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`. - -:::note - -As a best practice, before cleaning up any data files or directories, please ensure that your data has been backed up correctly, if required by your data integrity, backup, security, or other standard operating protocols (SOP). - -::: - -### Step 3 - -Now it's time to install TDengine on all hosts but without starting `taosd`. Note that the versions on all hosts should be same. If you are prompted to input the existing TDengine cluster, simply press carriage return to ignore the prompt. `install.sh -e no` can also be used to disable this prompt. For details please refer to [Install and Uninstall](/operation/pkg-install). - -### Step 4 - -Now each physical node (referred to, hereinafter, as `dnode` which is an abbreviation for "data node") of TDengine needs to be configured properly. Please note that one dnode doesn't stand for one host. Multiple TDengine dnodes can be started on a single host as long as they are configured properly without conflicting. More specifically each instance of the configuration file `taos.cfg` stands for a dnode. Assuming the first dnode of TDengine cluster is "h1.taosdata.com:6030", its `taos.cfg` is configured as following. - -```c -// firstEp is the end point to connect to when any dnode starts -firstEp h1.taosdata.com:6030 - -// must be configured to the FQDN of the host where the dnode is launched -fqdn h1.taosdata.com - -// the port used by the dnode, default is 6030 -serverPort 6030 - -// only necessary when replica is configured to an even number -#arbitrator ha.taosdata.com:6042 -``` - -`firstEp` and `fqdn` must be configured properly. In `taos.cfg` of all dnodes in TDengine cluster, `firstEp` must be configured to point to same address, i.e. the first dnode of the cluster. `fqdn` and `serverPort` compose the address of each node itself. If you want to start multiple TDengine dnodes on a single host, please make sure all other configurations like `dataDir`, `logDir`, and other resources related parameters are not conflicting. - -For all the dnodes in a TDengine cluster, the below parameters must be configured exactly the same, any node whose configuration is different from dnodes already in the cluster can't join the cluster. - -| **#** | **Parameter** | **Definition** | -| ----- | -------------- | ------------------------------------------------------------- | -| 1 | statusInterval | The time interval for which dnode reports its status to mnode | -| 2 | timezone | Time Zone where the server is located | -| 3 | locale | Location code of the system | -| 4 | charset | Character set of the system | - -## Start Cluster - -In the following example we assume that first dnode has FQDN h1.taosdata.com and the second dnode has FQDN h2.taosdata.com. - -### Start The First DNODE - -Start the first dnode following the instructions in [Get Started](/get-started/). Then launch TDengine CLI `taos` and execute command `show dnodes`, the output is as following for example: - -``` -Welcome to the TDengine shell from Linux, Client Version:3.0.0.0 -Copyright (c) 2022 by TAOS Data, Inc. All rights reserved. - -Server is Enterprise trial Edition, ver:3.0.0.0 and will never expire. - -taos> show dnodes; - id | endpoint | vnodes | support_vnodes | status | create_time | note | -============================================================================================================================================ - 1 | h1.taosdata.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | | -Query OK, 1 rows affected (0.007984s) - -taos> -``` - -From the above output, it is shown that the end point of the started dnode is "h1.taosdata.com:6030", which is the `firstEp` of the cluster. - -### Start Other DNODEs - -There are a few steps necessary to add other dnodes in the cluster. - -Let's assume we are starting the second dnode with FQDN, h2.taosdata.com. Firstly we make sure the configuration is correct. - -```c -// firstEp is the end point to connect to when any dnode starts -firstEp h1.taosdata.com:6030 - -// must be configured to the FQDN of the host where the dnode is launched -fqdn h2.taosdata.com - -// the port used by the dnode, default is 6030 -serverPort 6030 - -``` - -Secondly, we can start `taosd` as instructed in [Get Started](/get-started/). - -Then, on the first dnode i.e. h1.taosdata.com in our example, use TDengine CLI `taos` to execute the following command to add the end point of the dnode in the cluster. In the command "fqdn:port" should be quoted using double quotes. - -```sql -CREATE DNODE "h2.taos.com:6030"; -``` - -Then on the first dnode h1.taosdata.com, execute `show dnodes` in `taos` to show whether the second dnode has been added in the cluster successfully or not. - -```sql -SHOW DNODES; -``` - -If the status of the newly added dnode is offline, please check: - -- Whether the `taosd` process is running properly or not -- In the log file `taosdlog.0` to see whether the fqdn and port are correct - -The above process can be repeated to add more dnodes in the cluster. diff --git a/docs/en/10-cluster/02-cluster-mgmt.md b/docs/en/10-cluster/02-cluster-mgmt.md deleted file mode 100644 index 19ee034127364469931391257073ecbd6bc5507b..0000000000000000000000000000000000000000 --- a/docs/en/10-cluster/02-cluster-mgmt.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -sidebar_label: Operation -title: Manage DNODEs ---- - -The previous section, [Deployment],(/cluster/deploy) showed you how to deploy and start a cluster from scratch. Once a cluster is ready, the status of dnode(s) in the cluster can be shown at any time. Dnodes can be managed from the TDengine CLI. New dnode(s) can be added to scale out the cluster, an existing dnode can be removed and you can even perform load balancing manually, if necessary. - -:::note -All the commands introduced in this chapter must be run in the TDengine CLI - `taos`. Note that sometimes it is necessary to use root privilege. - -::: - -## Show DNODEs - -The below command can be executed in TDengine CLI `taos` to list all dnodes in the cluster, including ID, end point (fqdn:port), status (ready, offline), number of vnodes, number of free vnodes and so on. We recommend executing this command after adding or removing a dnode. - -```sql -SHOW DNODES; -``` - -Below is the example output of this command. - -``` -taos> show dnodes; - id | end_point | vnodes | cores | status | role | create_time | offline reason | -====================================================================================================================================== - 1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | | -Query OK, 1 row(s) in set (0.008298s) -``` - -## Show VGROUPs - -To utilize system resources efficiently and provide scalability, data sharding is required. The data of each database is divided into multiple shards and stored in multiple vnodes. These vnodes may be located on different dnodes. One way of scaling out is to add more vnodes on dnodes. Each vnode can only be used for a single DB, but one DB can have multiple vnodes. The allocation of vnode is scheduled automatically by mnode based on system resources of the dnodes. - -Launch TDengine CLI `taos` and execute below command: - -```sql -USE SOME_DATABASE; -SHOW VGROUPS; -``` - -The output is like below: - -taos> use db; -Database changed. - -taos> show vgroups; -vgId | tables | status | onlines | v1_dnode | v1_status | compacting | -========================================================================================== -14 | 38000 | ready | 1 | 1 | leader | 0 | -15 | 38000 | ready | 1 | 1 | leader | 0 | -16 | 38000 | ready | 1 | 1 | leader | 0 | -17 | 38000 | ready | 1 | 1 | leader | 0 | -18 | 37001 | ready | 1 | 1 | leader | 0 | -19 | 37000 | ready | 1 | 1 | leader | 0 | -20 | 37000 | ready | 1 | 1 | leader | 0 | -21 | 37000 | ready | 1 | 1 | leader | 0 | -Query OK, 8 row(s) in set (0.001154s) - -```` - -## Add DNODE - -Launch TDengine CLI `taos` and execute the command below to add the end point of a new dnode into the EPI (end point) list of the cluster. "fqdn:port" must be quoted using double quotes. - -```sql -CREATE DNODE "fqdn:port"; -```` - -The example output is as below: - -``` -taos> create dnode "localhost:7030"; -Query OK, 0 of 0 row(s) in database (0.008203s) - -taos> show dnodes; - id | end_point | vnodes | cores | status | role | create_time | offline reason | -====================================================================================================================================== - 1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | | - 2 | localhost:7030 | 0 | 0 | offline | any | 2022-04-19 08:11:42.158 | status not received | -Query OK, 2 row(s) in set (0.001017s) -``` - -It can be seen that the status of the new dnode is "offline". Once the dnode is started and connects to the firstEp of the cluster, you can execute the command again and get the example output below. As can be seen, both dnodes are in "ready" status. - -``` -taos> show dnodes; - id | end_point | vnodes | cores | status | role | create_time | offline reason | -====================================================================================================================================== - 1 | localhost:6030 | 3 | 8 | ready | any | 2022-04-15 08:27:09.359 | | - 2 | localhost:7030 | 6 | 8 | ready | any | 2022-04-19 08:14:59.165 | | -Query OK, 2 row(s) in set (0.001316s) -``` - -## Drop DNODE - -Launch TDengine CLI `taos` and execute the command below to drop or remove a dnode from the cluster. In the command, you can get `dnodeId` from `show dnodes`. - -```sql -DROP DNODE "fqdn:port"; -``` - -or - -```sql -DROP DNODE dnodeId; -``` - -The example output is below: - -``` -taos> show dnodes; - id | end_point | vnodes | cores | status | role | create_time | offline reason | -====================================================================================================================================== - 1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | | - 2 | localhost:7030 | 0 | 0 | offline | any | 2022-04-19 08:11:42.158 | status not received | -Query OK, 2 row(s) in set (0.001017s) - -taos> drop dnode 2; -Query OK, 0 of 0 row(s) in database (0.000518s) - -taos> show dnodes; - id | end_point | vnodes | cores | status | role | create_time | offline reason | -====================================================================================================================================== - 1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | | -Query OK, 1 row(s) in set (0.001137s) -``` - -In the above example, when `show dnodes` is executed the first time, two dnodes are shown. After `drop dnode 2` is executed, you can execute `show dnodes` again and it can be seen that only the dnode with ID 1 is still in the cluster. - -:::note - -- Once a dnode is dropped, it can't rejoin the cluster. To rejoin, the dnode needs to deployed again after cleaning up the data directory. Before dropping a dnode, the data belonging to the dnode MUST be migrated/backed up according to your data retention, data security or other SOPs. -- Please note that `drop dnode` is different from stopping `taosd` process. `drop dnode` just removes the dnode out of TDengine cluster. Only after a dnode is dropped, can the corresponding `taosd` process be stopped. -- Once a dnode is dropped, other dnodes in the cluster will be notified of the drop and will not accept the request from the dropped dnode. -- dnodeID is allocated automatically and can't be manually modified. dnodeID is generated in ascending order without duplication. - -::: diff --git a/docs/en/10-cluster/_category_.yml b/docs/en/10-cluster/_category_.yml deleted file mode 100644 index 141fd7832631d69efed214293c69cee336bc854d..0000000000000000000000000000000000000000 --- a/docs/en/10-cluster/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: Cluster diff --git a/docs/en/10-deployment/01-deploy.md b/docs/en/10-deployment/01-deploy.md new file mode 100644 index 0000000000000000000000000000000000000000..5dfcd3108d8b10cf24cdd5c852c4225ced0f16b2 --- /dev/null +++ b/docs/en/10-deployment/01-deploy.md @@ -0,0 +1,193 @@ +--- +sidebar_label: Manual Deployment +title: Manual Deployment and Management +--- + +## Prerequisites + +### Step 0 + +The FQDN of all hosts must be setup properly. For e.g. FQDNs may have to be configured in the /etc/hosts file on each host. You must confirm that each FQDN can be accessed from any other host. For e.g. you can do this by using the `ping` command. If you have a DNS server on your network, contact your network administrator for assistance. + +### Step 1 + +If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](/operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`. + +:::note +FQDN information is written to file. If you have started TDengine without configuring or changing the FQDN, ensure that data is backed up or no longer needed before running the `rm -rf /var/lib\taos/\*` command. +::: + +:::note +- The host where the client program runs also needs to be configured properly for FQDN, to make sure all hosts for client or server can be accessed from any other. In other words, the hosts where the client is running are also considered as a part of the cluster. +::: + +### Step 2 + +- Please ensure that your firewall rules do not block TCP/UDP on ports 6030-6042 on all hosts in the cluster. + +### Step 3 + +Now it's time to install TDengine on all hosts but without starting `taosd`. Note that the versions on all hosts should be same. If you are prompted to input the existing TDengine cluster, simply press carriage return to ignore the prompt. + +### Step 4 + +Now each physical node (referred to, hereinafter, as `dnode` which is an abbreviation for "data node") of TDengine needs to be configured properly. + +To get the hostname on any host, the command `hostname -f` can be executed. + +`ping ` command can be executed on each host to check whether any other host is accessible from it. If any host is not accessible, the network configuration, like /etc/hosts or DNS configuration, needs to be checked and revised, to make any two hosts accessible to each other. Hosts that are not accessible to each other cannot form a cluster. + +On the physical machine running the application, ping the dnode that is running taosd. If the dnode is not accessible, the application cannot connect to taosd. In this case, verify the DNS and hosts settings on the physical node running the application. + +The end point of each dnode is the output hostname and port, such as h1.tdengine.com:6030. + +### Step 5 + +Modify the TDengine configuration file `/etc/taos/taos.cfg` on each node. Assuming the first dnode of TDengine cluster is "h1.tdengine.com:6030", its `taos.cfg` is configured as following. + +```c +// firstEp is the end point to connect to when any dnode starts +firstEp h1.tdengine.com:6030 + +// must be configured to the FQDN of the host where the dnode is launched +fqdn h1.tdengine.com + +// the port used by the dnode, default is 6030 +serverPort 6030 + +``` + +`firstEp` and `fqdn` must be configured properly. In `taos.cfg` of all dnodes in TDengine cluster, `firstEp` must be configured to point to same address, i.e. the first dnode of the cluster. `fqdn` and `serverPort` compose the address of each node itself. Retain the default values for other parameters. + +For all the dnodes in a TDengine cluster, the below parameters must be configured exactly the same, any node whose configuration is different from dnodes already in the cluster can't join the cluster. + +| **#** | **Parameter** | **Definition** | +| ----- | ------------------ | ------------------------------------------- | +| 1 | statusInterval | The interval by which dnode reports its status to mnode | +| 2 | timezone | Timezone | +| 3 | locale | System region and encoding | +| 4 | charset | Character set | + +## Start Cluster + +The first dnode can be started following the instructions in [Get Started](/get-started/). Then TDengine CLI `taos` can be launched to execute command `show dnodes`, the output is as following for example: + +``` +taos> show dnodes; +id | endpoint | vnodes | support_vnodes | status | create_time | note | +============================================================================================================================================ +1 | h1.tdengine.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | | +Query OK, 1 rows affected (0.007984s) + + +``` + +From the above output, it is shown that the end point of the started dnode is "h1.tdengine.com:6030", which is the `firstEp` of the cluster. + +## Add DNODE + +There are a few steps necessary to add other dnodes in the cluster. + +Second, we can start `taosd` as instructed in [Get Started](/get-started/). + +Then, on the first dnode i.e. h1.tdengine.com in our example, use TDengine CLI `taos` to execute the following command: + +```sql +CREATE DNODE "h2.taos.com:6030"; +```` + +This adds the end point of the new dnode (from Step 4) into the end point list of the cluster. In the command "fqdn:port" should be quoted using double quotes. Change `"h2.taos.com:6030"` to the end point of your new dnode. + +Then on the first dnode h1.tdengine.com, execute `show dnodes` in `taos` + +```sql +SHOW DNODES; +``` + +to show whether the second dnode has been added in the cluster successfully or not. If the status of the newly added dnode is offline, please check: + +- Whether the `taosd` process is running properly or not +- In the log file `taosdlog.0` to see whether the fqdn and port are correct and add the correct end point if not. +The above process can be repeated to add more dnodes in the cluster. + +:::tip + +Any node that is in the cluster and online can be the firstEp of new nodes. +Nodes use the firstEp parameter only when joining a cluster for the first time. After a node has joined the cluster, it stores the latest mnode in its end point list and no longer makes use of firstEp. + +However, firstEp is used by clients that connect to the cluster. For example, if you run TDengine CLI `taos` without arguments, it connects to the firstEp by default. + +Two dnodes that are launched without a firstEp value operate independently of each other. It is not possible to add one dnode to the other dnode and form a cluster. It is also not possible to form two independent clusters into a new cluster. + +::: + +## Show DNODEs + +The below command can be executed in TDengine CLI `taos` + +```sql +SHOW DNODES; +``` + +to list all dnodes in the cluster, including ID, end point (fqdn:port), status (ready, offline), number of vnodes, number of free vnodes and so on. We recommend executing this command after adding or removing a dnode. + +Below is the example output of this command. + +``` +taos> show dnodes; + id | endpoint | vnodes | support_vnodes | status | create_time | note | +============================================================================================================================================ + 1 | trd01:6030 | 100 | 1024 | ready | 2022-07-15 16:47:47.726 | | +Query OK, 1 rows affected (0.006684s) +``` + +## Show VGROUPs + +To utilize system resources efficiently and provide scalability, data sharding is required. The data of each database is divided into multiple shards and stored in multiple vnodes. These vnodes may be located on different dnodes. One way of scaling out is to add more vnodes on dnodes. Each vnode can only be used for a single DB, but one DB can have multiple vnodes. The allocation of vnode is scheduled automatically by mnode based on system resources of the dnodes. + +Launch TDengine CLI `taos` and execute below command: + +```sql +USE SOME_DATABASE; +SHOW VGROUPS; +``` + +The example output is below: + +``` +taos> use db; +Database changed. + +taos> show vgroups; + vgroup_id | db_name | tables | v1_dnode | v1_status | v2_dnode | v2_status | v3_dnode | v3_status | status | nfiles | file_size | tsma | +================================================================================================================================================================================================ + 2 | db | 0 | 1 | leader | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 0 | + 3 | db | 0 | 1 | leader | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 0 | + 4 | db | 0 | 1 | leader | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 0 | +Query OK, 8 row(s) in set (0.001154s) +``` + +## Drop DNODE + +Before running the TDengine CLI, ensure that the taosd process has been stopped on the dnode that you want to delete. + +```sql +DROP DNODE "fqdn:port"; +``` + +or + +```sql +DROP DNODE dnodeId; +``` + +to drop or remove a dnode from the cluster. In the command, you can get `dnodeId` from `show dnodes`. + +:::warning + +- Once a dnode is dropped, it can't rejoin the cluster. To rejoin, the dnode needs to deployed again after cleaning up the data directory. Before dropping a dnode, the data belonging to the dnode MUST be migrated/backed up according to your data retention, data security or other SOPs. +- Please note that `drop dnode` is different from stopping `taosd` process. `drop dnode` just removes the dnode out of TDengine cluster. Only after a dnode is dropped, can the corresponding `taosd` process be stopped. +- Once a dnode is dropped, other dnodes in the cluster will be notified of the drop and will not accept the request from the dropped dnode. +- dnodeID is allocated automatically and can't be manually modified. dnodeID is generated in ascending order without duplication. + +::: diff --git a/docs/en/10-deployment/03-k8s.md b/docs/en/10-deployment/03-k8s.md new file mode 100644 index 0000000000000000000000000000000000000000..b0aa6777130864404e97dc332cf0e5ce830bf8ed --- /dev/null +++ b/docs/en/10-deployment/03-k8s.md @@ -0,0 +1,393 @@ +--- +sidebar_label: Kubernetes +title: Deploying a TDengine Cluster in Kubernetes +--- + +TDengine is a cloud-native time-series database that can be deployed on Kubernetes. This document gives a step-by-step description of how you can use YAML files to create a TDengine cluster and introduces common operations for TDengine in a Kubernetes environment. + +## Prerequisites + +Before deploying TDengine on Kubernetes, perform the following: + +* Current steps are compatible with Kubernetes v1.5 and later version. +* Install and configure minikube, kubectl, and helm. +* Install and deploy Kubernetes and ensure that it can be accessed and used normally. Update any container registries or other services as necessary. + +You can download the configuration files in this document from [GitHub](https://github.com/taosdata/TDengine-Operator/tree/3.0/src/tdengine). + +## Configure the service + +Create a service configuration file named `taosd-service.yaml`. Record the value of `metadata.name` (in this example, `taos`) for use in the next step. Add the ports required by TDengine: + +```yaml +--- +apiVersion: v1 +kind: Service +metadata: + name: "taosd" + labels: + app: "tdengine" +spec: + ports: + - name: tcp6030 + - protocol: "TCP" + port: 6030 + - name: tcp6041 + - protocol: "TCP" + port: 6041 + selector: + app: "tdengine" +``` + +## Configure the service as StatefulSet + +Configure the TDengine service as a StatefulSet. +Create the `tdengine.yaml` file and set `replicas` to 3. In this example, the region is set to Asia/Shanghai and 10 GB of standard storage are allocated per node. You can change the configuration based on your environment and business requirements. + +```yaml +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: "tdengine" + labels: + app: "tdengine" +spec: + serviceName: "taosd" + replicas: 3 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: "tdengine" + template: + metadata: + name: "tdengine" + labels: + app: "tdengine" + spec: + containers: + - name: "tdengine" + image: "tdengine/tdengine:3.0.0.0" + imagePullPolicy: "IfNotPresent" + ports: + - name: tcp6030 + - protocol: "TCP" + containerPort: 6030 + - name: tcp6041 + - protocol: "TCP" + containerPort: 6041 + env: + # POD_NAME for FQDN config + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + # SERVICE_NAME and NAMESPACE for fqdn resolve + - name: SERVICE_NAME + value: "taosd" + - name: STS_NAME + value: "tdengine" + - name: STS_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + # TZ for timezone settings, we recommend to always set it. + - name: TZ + value: "Asia/Shanghai" + # TAOS_ prefix will configured in taos.cfg, strip prefix and camelCase. + - name: TAOS_SERVER_PORT + value: "6030" + # Must set if you want a cluster. + - name: TAOS_FIRST_EP + value: "$(STS_NAME)-0.$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local:$(TAOS_SERVER_PORT)" + # TAOS_FQDN should always be set in k8s env. + - name: TAOS_FQDN + value: "$(POD_NAME).$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local" + volumeMounts: + - name: taosdata + mountPath: /var/lib/taos + readinessProbe: + exec: + command: + - taos-check + initialDelaySeconds: 5 + timeoutSeconds: 5000 + livenessProbe: + exec: + command: + - taos-check + initialDelaySeconds: 15 + periodSeconds: 20 + volumeClaimTemplates: + - metadata: + name: taosdata + spec: + accessModes: + - "ReadWriteOnce" + storageClassName: "standard" + resources: + requests: + storage: "10Gi" +``` + +## Use kubectl to deploy TDengine + +Run the following commands: + +```bash +kubectl apply -f taosd-service.yaml +kubectl apply -f tdengine.yaml +``` + +The preceding configuration generates a TDengine cluster with three nodes in which dnodes are automatically configured. You can run the `show dnodes` command to query the nodes in the cluster: + +```bash +kubectl exec -i -t tdengine-0 -- taos -s "show dnodes" +kubectl exec -i -t tdengine-1 -- taos -s "show dnodes" +kubectl exec -i -t tdengine-2 -- taos -s "show dnodes" +``` + +The output is as follows: + +``` +taos> show dnodes + id | endpoint | vnodes | support_vnodes | status | create_time | note | +============================================================================================================================================ + 1 | tdengine-0.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:14:57.285 | | + 2 | tdengine-1.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:11.302 | | + 3 | tdengine-2.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:23.290 | | +Query OK, 3 rows in database (0.003655s) +``` + +## Enable port forwarding + +The kubectl port forwarding feature allows applications to access the TDengine cluster running on Kubernetes. + +``` +kubectl port-forward tdengine-0 6041:6041 & +``` + +Use curl to verify that the TDengine REST API is working on port 6041: + +``` +$ curl -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql +Handling connection for 6041 +{"code":0,"column_meta":[["name","VARCHAR",64],["create_time","TIMESTAMP",8],["vgroups","SMALLINT",2],["ntables","BIGINT",8],["replica","TINYINT",1],["strict","VARCHAR",4],["duration","VARCHAR",10],["keep","VARCHAR",32],["buffer","INT",4],["pagesize","INT",4],["pages","INT",4],["minrows","INT",4],["maxrows","INT",4],["comp","TINYINT",1],["precision","VARCHAR",2],["status","VARCHAR",10],["retention","VARCHAR",60],["single_stable","BOOL",1],["cachemodel","VARCHAR",11],["cachesize","INT",4],["wal_level","TINYINT",1],["wal_fsync_period","INT",4],["wal_retention_period","INT",4],["wal_retention_size","BIGINT",8],["wal_roll_period","INT",4],["wal_segment_size","BIGINT",8]],"data":[["information_schema",null,null,16,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null],["performance_schema",null,null,10,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null]],"rows":2} +``` + +## Enable the dashboard for visualization + + The minikube dashboard command enables visualized cluster management. + +``` +$ minikube dashboard +* Verifying dashboard health ... +* Launching proxy ... +* Verifying proxy health ... +* Opening http://127.0.0.1:46617/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ in your default browser... +http://127.0.0.1:46617/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ +``` + +In some public clouds, minikube cannot be remotely accessed if it is bound to 127.0.0.1. In this case, use the kubectl proxy command to map the port to 0.0.0.0. Then, you can access the dashboard by using a web browser to open the dashboard URL above on the public IP address and port of the virtual machine. + +``` +$ kubectl proxy --accept-hosts='^.*$' --address='0.0.0.0' +``` + +## Scaling Out Your Cluster + +TDengine clusters can scale automatically: + +```bash +kubectl scale statefulsets tdengine --replicas=4 +``` + +The preceding command increases the number of replicas to 4. After running this command, query the pod status: + +```bash +kubectl get pods -l app=tdengine +``` + +The output is as follows: + +``` +NAME READY STATUS RESTARTS AGE +tdengine-0 1/1 Running 0 161m +tdengine-1 1/1 Running 0 161m +tdengine-2 1/1 Running 0 32m +tdengine-3 1/1 Running 0 32m +``` + +The status of all pods is Running. Once the pod status changes to Ready, you can check the dnode status: + +```bash +kubectl exec -i -t tdengine-3 -- taos -s "show dnodes" +``` + +The following output shows that the TDengine cluster has been expanded to 4 replicas: + +``` +taos> show dnodes + id | endpoint | vnodes | support_vnodes | status | create_time | note | +============================================================================================================================================ + 1 | tdengine-0.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:14:57.285 | | + 2 | tdengine-1.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:11.302 | | + 3 | tdengine-2.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:23.290 | | + 4 | tdengine-3.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:33:16.039 | | +Query OK, 4 rows in database (0.008377s) +``` + +## Scaling In Your Cluster + +When you scale in a TDengine cluster, your data is migrated to different nodes. You must run the drop dnodes command in TDengine to remove dnodes before scaling in your Kubernetes environment. + +Note: In a Kubernetes StatefulSet service, the newest pods are always removed first. For this reason, when you scale in your TDengine cluster, ensure that you drop the newest dnodes. + +``` +$ kubectl exec -i -t tdengine-0 -- taos -s "drop dnode 4" +``` + +```bash +$ kubectl exec -it tdengine-0 -- taos -s "show dnodes" + +taos> show dnodes + id | endpoint | vnodes | support_vnodes | status | create_time | note | +============================================================================================================================================ + 1 | tdengine-0.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:14:57.285 | | + 2 | tdengine-1.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:11.302 | | + 3 | tdengine-2.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:23.290 | | +Query OK, 3 rows in database (0.004861s) +``` + +Verify that the dnode have been successfully removed by running the `kubectl exec -i -t tdengine-0 -- taos -s "show dnodes"` command. Then run the following command to remove the pod: + +``` +kubectl scale statefulsets tdengine --replicas=3 +``` + +The newest pod in the deployment is removed. Run the `kubectl get pods -l app=tdengine` command to query the pod status: + +``` +$ kubectl get pods -l app=tdengine +NAME READY STATUS RESTARTS AGE +tdengine-0 1/1 Running 0 4m7s +tdengine-1 1/1 Running 0 3m55s +tdengine-2 1/1 Running 0 2m28s +``` + +After the pod has been removed, manually delete the PersistentVolumeClaim (PVC). Otherwise, future scale-outs will attempt to use existing data. + +```bash +$ kubectl delete pvc taosdata-tdengine-3 +``` + +Your cluster has now been safely scaled in, and you can scale it out again as necessary. + +```bash +$ kubectl scale statefulsets tdengine --replicas=4 +statefulset.apps/tdengine scaled +it@k8s-2:~/TDengine-Operator/src/tdengine$ kubectl get pods -l app=tdengine +NAME READY STATUS RESTARTS AGE +tdengine-0 1/1 Running 0 35m +tdengine-1 1/1 Running 0 34m +tdengine-2 1/1 Running 0 12m +tdengine-3 0/1 ContainerCreating 0 4s +it@k8s-2:~/TDengine-Operator/src/tdengine$ kubectl get pods -l app=tdengine +NAME READY STATUS RESTARTS AGE +tdengine-0 1/1 Running 0 35m +tdengine-1 1/1 Running 0 34m +tdengine-2 1/1 Running 0 12m +tdengine-3 0/1 Running 0 7s +it@k8s-2:~/TDengine-Operator/src/tdengine$ kubectl exec -it tdengine-0 -- taos -s "show dnodes" + +taos> show dnodes +id | endpoint | vnodes | support_vnodes | status | create_time | offline reason | +====================================================================================================================================== +1 | tdengine-0.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 17:38:49.012 | | +2 | tdengine-1.taosd.default.sv... | 1 | 4 | ready | 2022-07-25 17:39:01.517 | | +5 | tdengine-2.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 18:01:36.479 | | +6 | tdengine-3.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 18:13:54.411 | | +Query OK, 4 row(s) in set (0.001348s) +``` + +## Remove a TDengine Cluster + +To fully remove a TDengine cluster, you must delete its statefulset, svc, configmap, and pvc entries: + +```bash +kubectl delete statefulset -l app=tdengine +kubectl delete svc -l app=tdengine +kubectl delete pvc -l app=tdengine +kubectl delete configmap taoscfg + +``` + +## Troubleshooting + +### Error 1 + +If you remove a pod without first running `drop dnode`, some TDengine nodes will go offline. + +``` +$ kubectl exec -it tdengine-0 -- taos -s "show dnodes" + +taos> show dnodes +id | endpoint | vnodes | support_vnodes | status | create_time | offline reason | +====================================================================================================================================== +1 | tdengine-0.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 17:38:49.012 | | +2 | tdengine-1.taosd.default.sv... | 1 | 4 | ready | 2022-07-25 17:39:01.517 | | +5 | tdengine-2.taosd.default.sv... | 0 | 4 | offline | 2022-07-25 18:01:36.479 | status msg timeout | +6 | tdengine-3.taosd.default.sv... | 0 | 4 | offline | 2022-07-25 18:13:54.411 | status msg timeout | +Query OK, 4 row(s) in set (0.001323s) +``` + +### Error 2 + +If the number of nodes after a scale-in is less than the value of the replica parameter, the cluster will go down: + +Create a database with replica set to 2 and add data. + +```bash +kubectl exec -i -t tdengine-0 -- \ + taos -s \ + "create database if not exists test replica 2; + use test; + create table if not exists t1(ts timestamp, n int); + insert into t1 values(now, 1)(now+1s, 2);" + + +``` + +Scale in to one node: + +```bash +kubectl scale statefulsets tdengine --replicas=1 + +``` + +In the TDengine CLI, you can see that no database operations succeed: + +``` +taos> show dnodes; + id | end_point | vnodes | cores | status | role | create_time | offline reason | +====================================================================================================================================== + 1 | tdengine-0.taosd.default.sv... | 2 | 40 | ready | any | 2021-06-01 15:55:52.562 | | + 2 | tdengine-1.taosd.default.sv... | 1 | 40 | offline | any | 2021-06-01 15:56:07.212 | status msg timeout | +Query OK, 2 row(s) in set (0.000845s) + +taos> show dnodes; + id | end_point | vnodes | cores | status | role | create_time | offline reason | +====================================================================================================================================== + 1 | tdengine-0.taosd.default.sv... | 2 | 40 | ready | any | 2021-06-01 15:55:52.562 | | + 2 | tdengine-1.taosd.default.sv... | 1 | 40 | offline | any | 2021-06-01 15:56:07.212 | status msg timeout | +Query OK, 2 row(s) in set (0.000837s) + +taos> use test; +Database changed. + +taos> insert into t1 values(now, 3); + +DB error: Unable to resolve FQDN (0.013874s) + +``` diff --git a/docs/en/10-deployment/05-helm.md b/docs/en/10-deployment/05-helm.md new file mode 100644 index 0000000000000000000000000000000000000000..a4fa68100078efe85fff5e1b078ebd07e5337d5a --- /dev/null +++ b/docs/en/10-deployment/05-helm.md @@ -0,0 +1,298 @@ +--- +sidebar_label: Helm +title: Use Helm to deploy TDengine +--- + +Helm is a package manager for Kubernetes that can provide more capabilities in deploying on Kubernetes. + +## Install Helm + +```bash +curl -fsSL -o get_helm.sh \ + https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 +chmod +x get_helm.sh +./get_helm.sh + +``` + +Helm uses the kubectl and kubeconfig configurations to perform Kubernetes operations. For more information, see the Rancher configuration for Kubernetes installation. + +## Install TDengine Chart + +To use TDengine Chart, download it from GitHub: + +```bash +wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.0.tgz + +``` + +Query the storageclass of your Kubernetes deployment: + +```bash +kubectl get storageclass + +``` + +With minikube, the default value is standard. + +Use Helm commands to install TDengine: + +```bash +helm install tdengine tdengine-3.0.0.tgz \ + --set storage.className= + +``` + +You can configure a small storage size in minikube to ensure that your deployment does not exceed your available disk space. + +```bash +helm install tdengine tdengine-3.0.0.tgz \ + --set storage.className=standard \ + --set storage.dataSize=2Gi \ + --set storage.logSize=10Mi + +``` + +After TDengine is deployed, TDengine Chart outputs information about how to use TDengine: + +```bash +export POD_NAME=$(kubectl get pods --namespace default \ + -l "app.kubernetes.io/name=tdengine,app.kubernetes.io/instance=tdengine" \ + -o jsonpath="{.items[0].metadata.name}") +kubectl --namespace default exec $POD_NAME -- taos -s "show dnodes; show mnodes" +kubectl --namespace default exec -it $POD_NAME -- taos + +``` + +You can test the deployment by creating a table: + +```bash +kubectl --namespace default exec $POD_NAME -- \ + taos -s "create database test; + use test; + create table t1 (ts timestamp, n int); + insert into t1 values(now, 1)(now + 1s, 2); + select * from t1;" + +``` + +## Configuring Values + +You can configure custom parameters in TDengine with the `values.yaml` file. + +Run the `helm show values` command to see all parameters supported by TDengine Chart. + +```bash +helm show values tdengine-3.0.0.tgz + +``` + +Save the output of this command as `values.yaml`. Then you can modify this file with your desired values and use it to deploy a TDengine cluster: + +```bash +helm install tdengine tdengine-3.0.0.tgz -f values.yaml + +``` + +The parameters are described as follows: + +```yaml +# Default values for tdengine. +# This is a YAML-formatted file. +# Declare variables to be passed into helm templates. + +replicaCount: 1 + +image: + prefix: tdengine/tdengine + #pullPolicy: Always + # Overrides the image tag whose default is the chart appVersion. +# tag: "3.0.0.0" + +service: + # ClusterIP is the default service type, use NodeIP only if you know what you are doing. + type: ClusterIP + ports: + # TCP range required + tcp: [6030, 6041, 6042, 6043, 6044, 6046, 6047, 6048, 6049, 6060] + # UDP range + udp: [6044, 6045] + + +# Set timezone here, not in taoscfg +timezone: "Asia/Shanghai" + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +storage: + # Set storageClassName for pvc. K8s use default storage class if not set. + # + className: "" + dataSize: "100Gi" + logSize: "10Gi" + +nodeSelectors: + taosd: + # node selectors + +clusterDomainSuffix: "" +# Config settings in taos.cfg file. +# +# The helm/k8s support will use environment variables for taos.cfg, +# converting an upper-snake-cased variable like `TAOS_DEBUG_FLAG`, +# to a camelCase taos config variable `debugFlag`. +# +# See the [Configuration Variables](../../reference/config) +# +# Note: +# 1. firstEp/secondEp: should not be setted here, it's auto generated at scale-up. +# 2. serverPort: should not be setted, we'll use the default 6030 in many places. +# 3. fqdn: will be auto generated in kubenetes, user should not care about it. +# 4. role: currently role is not supported - every node is able to be mnode and vnode. +# +# Btw, keep quotes "" around the value like below, even the value will be number or not. +taoscfg: + # Starts as cluster or not, must be 0 or 1. + # 0: all pods will start as a seperate TDengine server + # 1: pods will start as TDengine server cluster. [default] + CLUSTER: "1" + + # number of replications, for cluster only + TAOS_REPLICA: "1" + + # + # TAOS_NUM_OF_RPC_THREADS: number of threads for RPC + #TAOS_NUM_OF_RPC_THREADS: "2" + + + # + # TAOS_NUM_OF_COMMIT_THREADS: number of threads to commit cache data + #TAOS_NUM_OF_COMMIT_THREADS: "4" + + # enable/disable installation / usage report + #TAOS_TELEMETRY_REPORTING: "1" + + # time interval of system monitor, seconds + #TAOS_MONITOR_INTERVAL: "30" + + # time interval of dnode status reporting to mnode, seconds, for cluster only + #TAOS_STATUS_INTERVAL: "1" + + # time interval of heart beat from shell to dnode, seconds + #TAOS_SHELL_ACTIVITY_TIMER: "3" + + # minimum sliding window time, milli-second + #TAOS_MIN_SLIDING_TIME: "10" + + # minimum time window, milli-second + #TAOS_MIN_INTERVAL_TIME: "1" + + # the compressed rpc message, option: + # -1 (no compression) + # 0 (all message compressed), + # > 0 (rpc message body which larger than this value will be compressed) + #TAOS_COMPRESS_MSG_SIZE: "-1" + + # max number of connections allowed in dnode + #TAOS_MAX_SHELL_CONNS: "50000" + + # stop writing logs when the disk size of the log folder is less than this value + #TAOS_MINIMAL_LOG_DIR_G_B: "0.1" + + # stop writing temporary files when the disk size of the tmp folder is less than this value + #TAOS_MINIMAL_TMP_DIR_G_B: "0.1" + + # if disk free space is less than this value, taosd service exit directly within startup process + #TAOS_MINIMAL_DATA_DIR_G_B: "0.1" + + # One mnode is equal to the number of vnode consumed + #TAOS_MNODE_EQUAL_VNODE_NUM: "4" + + # enbale/disable http service + #TAOS_HTTP: "1" + + # enable/disable system monitor + #TAOS_MONITOR: "1" + + # enable/disable async log + #TAOS_ASYNC_LOG: "1" + + # + # time of keeping log files, days + #TAOS_LOG_KEEP_DAYS: "0" + + # The following parameters are used for debug purpose only. + # debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR + # 131: output warning and error + # 135: output debug, warning and error + # 143: output trace, debug, warning and error to log + # 199: output debug, warning and error to both screen and file + # 207: output trace, debug, warning and error to both screen and file + # + # debug flag for all log type, take effect when non-zero value\ + #TAOS_DEBUG_FLAG: "143" + + # generate core file when service crash + #TAOS_ENABLE_CORE_FILE: "1" +``` + +## Scaling Out + +For information about scaling out your deployment, see Kubernetes. Additional Helm-specific is described as follows. + +First, obtain the name of the StatefulSet service for your deployment. + +```bash +export STS_NAME=$(kubectl get statefulset \ + -l "app.kubernetes.io/name=tdengine" \ + -o jsonpath="{.items[0].metadata.name}") + +``` + +You can scale out your deployment by adding replicas. The following command scales a deployment to three nodes: + +```bash +kubectl scale --replicas 3 statefulset/$STS_NAME + +``` + +Run the `show dnodes` and `show mnodes` commands to check whether the scale-out was successful. + +## Scaling In + +:::warning +Exercise caution when scaling in a cluster. + +::: + +Determine which dnodes you want to remove and drop them manually. + +```bash +kubectl --namespace default exec $POD_NAME -- \ + cat /var/lib/taos/dnode/dnodeEps.json \ + | jq '.dnodeInfos[1:] |map(.dnodeFqdn + ":" + (.dnodePort|tostring)) | .[]' -r +kubectl --namespace default exec $POD_NAME -- taos -s "show dnodes" +kubectl --namespace default exec $POD_NAME -- taos -s 'drop dnode ""' + +``` + +## Remove a TDengine Cluster + +You can use Helm to remove your cluster: + +```bash +helm uninstall tdengine + +``` + +However, Helm does not remove PVCs automatically. After you remove your cluster, manually remove all PVCs. diff --git a/docs/en/10-deployment/_category_.yml b/docs/en/10-deployment/_category_.yml new file mode 100644 index 0000000000000000000000000000000000000000..0bb1ba461bd4c0e350c60fa3a8bc7723429a3f9f --- /dev/null +++ b/docs/en/10-deployment/_category_.yml @@ -0,0 +1 @@ +label: Deployment diff --git a/docs/en/10-cluster/index.md b/docs/en/10-deployment/index.md similarity index 78% rename from docs/en/10-cluster/index.md rename to docs/en/10-deployment/index.md index 5a45a2ce7b08c67322265cf1bbd54ef66cbfc027..7054a33e4a40222ed5eb9a15837990e3e7a81cff 100644 --- a/docs/en/10-cluster/index.md +++ b/docs/en/10-deployment/index.md @@ -1,11 +1,10 @@ --- -title: Cluster -keywords: ["cluster", "high availability", "load balance", "scale out"] +title: Deployment --- TDengine has a native distributed design and provides the ability to scale out. A few nodes can form a TDengine cluster. If you need higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source. -This chapter mainly introduces cluster deployment, maintenance, and how to achieve high availability and load balancing. +This document describes how to manually deploy a cluster on a host as well as how to deploy on Kubernetes and by using Helm. ```mdx-code-block import DocCardList from '@theme/DocCardList'; diff --git a/docs/en/12-taos-sql/01-data-type.md b/docs/en/12-taos-sql/01-data-type.md index d038219c8ac66db52416001f7a79c71018e2ca33..876de50f35ee3ba533bd7d5916632de853a84c0e 100644 --- a/docs/en/12-taos-sql/01-data-type.md +++ b/docs/en/12-taos-sql/01-data-type.md @@ -1,16 +1,17 @@ --- +sidebar_label: Data Types title: Data Types description: "TDengine supports a variety of data types including timestamp, float, JSON and many others." --- -## TIMESTAMP +## Timestamp When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows. Timestamp must follow the rules below: - The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128` - Internal function `now` can be used to get the current timestamp on the client side - The current timestamp of the client side is applied when `now` is used to insert data -- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from 1970-01-01 00:00:00.000 (UTC/GMT) +- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00. - Add/subtract operations can be carried out on timestamps. For example `now-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations. Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds. @@ -18,52 +19,54 @@ Time precision in TDengine can be set by the `PRECISION` parameter when executin ```sql CREATE DATABASE db_name PRECISION 'ns'; ``` - ## Data Types In TDengine, the data types below can be used when specifying a column or tag. | # | **type** | **Bytes** | **Description** | -| --- | :-------: | --------- | ------------------------- | -| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported | +| --- | :-------: | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported | | 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1] | -| 3 |INT UNSIGNED|4 | Unsigned integer, the value range is [0, 2^31-1] | +| 3 | INT UNSIGNED| 4| unsigned integer, the value range is [0, 2^32-1] | 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1] | -| 5 | BIGINT UNSIGNED | 8 | Unsigned long integer, the value range is [0, 2^63-1] | +| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1] | | 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38] | | 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] | -| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. The string length can be up to 16374 bytes. The string value must be quoted with single quotes. The literal single quote inside the string must be preceded with back slash like `\'` | -| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767] | -| 10 | SMALLINT UNSIGNED | 2 | Unsigned short integer, the value range is [0, 32767] | -| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127] | -| 12 | TINYINT UNSIGNED | 1 | Unsigned single-byte integer, the value range is [0, 127] | -| 13 | BOOL | 1 | Bool, the value range is {true, false} | -| 14 | NCHAR | User Defined| Multi-Byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. | +| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. | +| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767] | +| 10 | INT UNSIGNED| 2| unsigned integer, the value range is [0, 65535]| +| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127] | +| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255] | +| 13 | BOOL | 1 | Bool, the value range is {true, false} | +| 14 | NCHAR | User Defined| Multi-Byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. | | 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type | -| 16 | VARCHAR | User Defined| Alias of BINARY type | +| 16 | VARCHAR | User-defined | Alias of BINARY | + :::note - TDengine is case insensitive and treats any characters in the sql command as lower case by default, case sensitive strings must be quoted with single quotes. -- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type. +- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type. +- The length of BINARY can be up to 16374 bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'` - Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number. ::: + ## Constants -TDengine supports constants of multiple data type. +TDengine supports a variety of constants: | # | **Syntax** | **Type** | **Description** | | --- | :-------: | --------- | -------------------------------------- | -| 1 | [{+ \| -}]123 | BIGINT | Numeric constants are treated as BIGINT type. The value will be truncated if it exceeds the range of BIGINT type. | -| 2 | 123.45 | DOUBLE | Floating number constants are treated as DOUBLE type. TDengine determines whether it's a floating number based on if decimal point or scientific notation is used. | -| 3 | 1.2E3 | DOUBLE | Constants in scientific notation are treated ad DOUBLE type. | -| 4 | 'abc' | BINARY | String constants enclosed by single quotes are treated as BINARY type. Its size is determined as the acutal length. Single quote itself can be included by preceding backslash, i.e. `\'`, in a string constant. | -| 5 | "abc" | BINARY | String constants enclosed by double quotes are treated as BINARY type. Its size is determined as the acutal length. Double quote itself can be included by preceding backslash, i.e. `\"`, in a string constant. | -| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | A string constant following `TIMESTAMP` keyword is treated as TIMESTAMP type. The string should be in the format of "YYYY-MM-DD HH:mm:ss.MS". Its time precision is same as that of the current database being used. | -| 7 | {TRUE \| FALSE} | BOOL | BOOL type contant. | -| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | NULL constant, it can be used for any type.| +| 1 | [{+ \| -}]123 | BIGINT | Integer literals are of type BIGINT. Data that exceeds the length of the BIGINT type is truncated. | +| 2 | 123.45 | DOUBLE | Floating-point literals are of type DOUBLE. Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used. | +| 3 | 1.2E3 | DOUBLE | Literals in scientific notation are of type DOUBLE. | +| 4 | 'abc' | BINARY | Content enclosed in single quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal single quote inside the string must be escaped with a backslash (\'). | +| 5 | 'abc' | BINARY | Content enclosed in double quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal double quote inside the string must be escaped with a backslash (\"). | +| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | The TIMESTAMP keyword indicates that the following string literal is interpreted as a timestamp. The string must be in YYYY-MM-DD HH:mm:ss.MS format. The precision is inherited from the database configuration. | +| 7 | {TRUE \| FALSE} | BOOL | Boolean literals are of type BOOL. | +| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | The preceding characters indicate null literals. These can be used with any data type. | :::note -- TDengine determines whether it's a floating number based on if decimal point or scientific notation is used. So whether the value is determined as overflow depends on both the value and the determined type. For example, 9999999999999999999 is determined as overflow because it exceeds the upper limit of BIGINT type, while 9999999999999999999.0 is considered as a valid floating number because it is within the range of DOUBLE type. +Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number. ::: diff --git a/docs/en/12-taos-sql/02-database.md b/docs/en/12-taos-sql/02-database.md index c2961d62415cd7d23b031777082801426b221190..d9dadae976bf07bbf6cfb49401d55bb0bf18da49 100644 --- a/docs/en/12-taos-sql/02-database.md +++ b/docs/en/12-taos-sql/02-database.md @@ -4,123 +4,153 @@ title: Database description: "create and drop database, show or change database parameters" --- -## Create Database +## Create a Database + +```sql +CREATE DATABASE [IF NOT EXISTS] db_name [database_options] + +database_options: + database_option ... + +database_option: { + BUFFER value + | CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'} + | CACHESIZE value + | COMP {0 | 1 | 2} + | DURATION value + | WAL_FSYNC_PERIOD value + | MAXROWS value + | MINROWS value + | KEEP value + | PAGES value + | PAGESIZE value + | PRECISION {'ms' | 'us' | 'ns'} + | REPLICA value + | RETENTIONS ingestion_duration:keep_duration ... + | STRICT {'off' | 'on'} + | WAL_LEVEL {1 | 2} + | VGROUPS value + | SINGLE_STABLE {0 | 1} + | WAL_RETENTION_PERIOD value + | WAL_ROLL_PERIOD value + | WAL_RETENTION_SIZE value + | WAL_SEGMENT_SIZE value +} +``` + +## Parameters + +- BUFFER: specifies the size (in MB) of the write buffer for each vnode. Enter a value between 3 and 16384. The default value is 96. +- CACHEMODEL: specifies how the latest data in subtables is stored in the cache. The default value is none. + - none: The latest data is not cached. + - last_row: The last row of each subtable is cached. This option significantly improves the performance of the LAST_ROW function. + - last_value: The last non-null value of each column in each subtable is cached. This option significantly improves the performance of the LAST function under normal circumstances, such as statements including the WHERE, ORDER BY, GROUP BY, and INTERVAL keywords. + - both: The last row of each subtable and the last non-null value of each column in each subtable are cached. +- CACHESIZE: specifies the amount (in MB) of memory used for subtable caching on each vnode. Enter a value between 1 and 65536. The default value is 1. +- COMP: specifies how databases are compressed. The default value is 2. + - 0: Compression is disabled. + - 1: One-pass compression is enabled. + - 2: Two-pass compression is enabled. +- DURATION: specifies the time period contained in each data file. After the time specified by this parameter has elapsed, TDengine creates a new data file to store incoming data. You can use m (minutes), h (hours), and d (days) as the unit, for example DURATION 100h or DURATION 10d. If you do not include a unit, d is used by default. +- WAL_FSYNC_PERIOD: specifies the interval (in milliseconds) at which data is written from the WAL to disk. This parameter takes effect only when the WAL parameter is set to 2. The default value is 3000. Enter a value between 0 and 180000. The value 0 indicates that incoming data is immediately written to disk. +- MAXROWS: specifies the maximum number of rows recorded in a block. The default value is 4096. +- MINROWS: specifies the minimum number of rows recorded in a block. The default value is 100. +- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. +- PAGES: specifies the number of pages in the metadata storage engine cache on each vnode. Enter a value greater than or equal to 64. The default value is 256. The space occupied by metadata storage on each vnode is equal to the product of the values of the PAGESIZE and PAGES parameters. The space occupied by default is 1 MB. +- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384. +- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms. +- REPLICA: specifies the number of replicas that are made of the database. Enter 1 or 3. The default value is 1. The value of the REPLICA parameter cannot exceed the number of dnodes in the cluster. +- RETENTIONS: specifies the retention period for data aggregated at various intervals. For example, RETENTIONS 15s:7d,1m:21d,15m:50d indicates that data aggregated every 15 seconds is retained for 7 days, data aggregated every 1 minute is retained for 21 days, and data aggregated every 15 minutes is retained for 50 days. You must enter three aggregation intervals and corresponding retention periods. +- STRICT: specifies whether strong data consistency is enabled. The default value is off. + - on: Strong consistency is enabled and implemented through the Raft consensus algorithm. In this mode, an operation is considered successful once it is confirmed by half of the nodes in the cluster. + - off: Strong consistency is disabled. In this mode, an operation is considered successful when it is initiated by the local node. +- WAL_LEVEL: specifies whether fsync is enabled. The default value is 1. + - 1: WAL is enabled but fsync is disabled. + - 2: WAL and fsync are both enabled. +- VGROUPS: specifies the initial number of vgroups when a database is created. +- SINGLE_STABLE: specifies whether the database can contain more than one supertable. + - 0: The database can contain multiple supertables. + - 1: The database can contain only one supertable. +- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. +- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. +- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. +- WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. After the current WAL file reaches this size, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. + +### Example Statement + +```sql +create database if not exists db vgroups 10 buffer 10 + +``` + +The preceding SQL statement creates a database named db that has 10 vgroups and whose vnodes have a 10 MB cache. + +### Specify the Database in Use ``` -CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; +USE db_name; ``` -:::info - -1. KEEP specifies the number of days for which the data in the database will be retained. The default value is 3650 days, i.e. 10 years. The data will be deleted automatically once its age exceeds this threshold. -2. UPDATE specifies whether the data can be updated and how the data can be updated. - 1. UPDATE set to 0 means update operation is not allowed. The update for data with an existing timestamp will be discarded silently and the original record in the database will be preserved as is. - 2. UPDATE set to 1 means the whole row will be updated. The columns for which no value is specified will be set to NULL. - 3. UPDATE set to 2 means updating a subset of columns for a row is allowed. The columns for which no value is specified will be kept unchanged. -3. The maximum length of database name is 33 bytes. -4. The maximum length of a SQL statement is 65,480 bytes. -5. Below are the parameters that can be used when creating a database - - cache: [Description](/reference/config/#cache) - - blocks: [Description](/reference/config/#blocks) - - days: [Description](/reference/config/#days) - - keep: [Description](/reference/config/#keep) - - minRows: [Description](/reference/config/#minrows) - - maxRows: [Description](/reference/config/#maxrows) - - wal: [Description](/reference/config/#wallevel) - - fsync: [Description](/reference/config/#fsync) - - update: [Description](/reference/config/#update) - - cacheLast: [Description](/reference/config/#cachelast) - - replica: [Description](/reference/config/#replica) - - quorum: [Description](/reference/config/#quorum) - - comp: [Description](/reference/config/#comp) - - precision: [Description](/reference/config/#precision) -6. Please note that all of the parameters mentioned in this section are configured in configuration file `taos.cfg` on the TDengine server. If not specified in the `create database` statement, the values from taos.cfg are used by default. To override default parameters, they must be specified in the `create database` statement. - -::: +The preceding SQL statement switches to the specified database. (If you connect to TDengine over the REST API, this statement does not take effect.) -## Show Current Configuration +## Drop a Database ``` -SHOW VARIABLES; +DROP DATABASE [IF EXISTS] db_name ``` -## Specify The Database In Use +The preceding SQL statement deletes the specified database. This statement will delete all tables in the database and destroy all vgroups associated with it. Exercise caution when using this statement. -``` -USE db_name; -``` - -:::note -This way is not applicable when using a REST connection. In a REST connection the database name must be specified before a table or stable name. For e.g. to query the stable "meters" in database "test" the query would be "SELECT count(*) from test.meters" +## Change Database Configuration -::: +```sql +ALTER DATABASE db_name [alter_database_options] -## Drop Database +alter_database_options: + alter_database_option ... -``` -DROP DATABASE [IF EXISTS] db_name; +alter_database_option: { + CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'} + | CACHESIZE value + | WAL_LEVEL value + | WAL_FSYNC_PERIOD value + | KEEP value +} ``` :::note -All data in the database will be deleted too. This command must be used with extreme caution. Please follow your organization's data integrity, data backup, data security or any other applicable SOPs before using this command. +Other parameters cannot be modified after the database has been created. ::: -## Change Database Configuration +## View Databases -Some examples are shown below to demonstrate how to change the configuration of a database. Please note that some configuration parameters can be changed after the database is created, but some cannot. For details of the configuration parameters of database please refer to [Configuration Parameters](/reference/config/). +### View All Databases ``` -ALTER DATABASE db_name COMP 2; -``` - -COMP parameter specifies whether the data is compressed and how the data is compressed. - -``` -ALTER DATABASE db_name REPLICA 2; -``` - -REPLICA parameter specifies the number of replicas of the database. - -``` -ALTER DATABASE db_name KEEP 365; +SHOW DATABASES; ``` -KEEP parameter specifies the number of days for which the data will be kept. +### View the CREATE Statement for a Database ``` -ALTER DATABASE db_name QUORUM 2; +SHOW CREATE DATABASE db_name; ``` -QUORUM parameter specifies the necessary number of confirmations to determine whether the data is written successfully. +The preceding SQL statement can be used in migration scenarios. This command can be used to get the CREATE statement, which can be used in another TDengine instance to create the exact same database. -``` -ALTER DATABASE db_name BLOCKS 100; -``` +### View Database Configuration -BLOCKS parameter specifies the number of memory blocks used by each VNODE. - -``` -ALTER DATABASE db_name CACHELAST 0; +```sql +SHOW DATABASES \G; ``` -CACHELAST parameter specifies whether and how the latest data of a sub table is cached. +The preceding SQL statement shows the value of each parameter for the specified database. One value is displayed per line. -:::tip -The above parameters can be changed using `ALTER DATABASE` command without restarting. For more details of all configuration parameters please refer to [Configuration Parameters](/reference/config/). - -::: +## Delete Expired Data -## Show All Databases - -``` -SHOW DATABASES; -``` - -## Show The Create Statement of A Database - -``` -SHOW CREATE DATABASE db_name; +```sql +TRIM DATABASE db_name; ``` -This command is useful when migrating the data from one TDengine cluster to another. This command can be used to get the CREATE statement, which can be used in another TDengine instance to create the exact same database. +The preceding SQL statement deletes data that has expired and orders the remaining data in accordance with the storage configuration. diff --git a/docs/en/12-taos-sql/03-table.md b/docs/en/12-taos-sql/03-table.md index f065a8e2396583bb7a512446b513ed60056ad55e..5a2c8ed6ee4a5ea129023fec68fa97d577832f60 100644 --- a/docs/en/12-taos-sql/03-table.md +++ b/docs/en/12-taos-sql/03-table.md @@ -1,127 +1,198 @@ --- -sidebar_label: Table title: Table -description: create super table, normal table and sub table, drop tables and change tables --- ## Create Table -``` -CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]); -``` - -:::info +You create standard tables and subtables with the `CREATE TABLE` statement. + +```sql +CREATE TABLE [IF NOT EXISTS] [db_name.]tb_name (create_definition [, create_definitionn] ...) [table_options] + +CREATE TABLE create_subtable_clause + +CREATE TABLE [IF NOT EXISTS] [db_name.]tb_name (create_definition [, create_definitionn] ...) + [TAGS (create_definition [, create_definitionn] ...)] + [table_options] + +create_subtable_clause: { + create_subtable_clause [create_subtable_clause] ... + | [IF NOT EXISTS] [db_name.]tb_name USING [db_name.]stb_name [(tag_name [, tag_name] ...)] TAGS (tag_value [, tag_value] ...) +} + +create_definition: + col_name column_definition + +column_definition: + type_name [comment 'string_value'] + +table_options: + table_option ... + +table_option: { + COMMENT 'string_value' + | WATERMARK duration[,duration] + | MAX_DELAY duration[,duration] + | ROLLUP(func_name [, func_name] ...) + | SMA(col_name [, col_name] ...) + | TTL value +} + +``` + +**More explanations** 1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key. 2. The maximum length of the table name is 192 bytes. 3. The maximum length of each row is 48k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted. 4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive. 5. The maximum length in bytes must be specified when using BINARY or NCHAR types. -6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive. Only ASCII visible characters can be used with escape character. +6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive. For example \`aBc\` and \`abc\` are different table names but `abc` and `aBc` are same table names because they are both converted to `abc` internally. + Only ASCII visible characters can be used with escape character. -::: +**Parameter description** +1. COMMENT: specifies comments for the table. This parameter can be used with supertables, standard tables, and subtables. +2. WATERMARK: specifies the time after which the window is closed. The default value is 5 seconds. Enter a value between 0 and 15 minutes in milliseconds, seconds, or minutes. You can enter multiple values separated by commas (,). This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database. +3. MAX_DELAY: specifies the maximum latency for pushing computation results. The default value is 15 minutes or the value of the INTERVAL parameter, whichever is smaller. Enter a value between 0 and 15 minutes in milliseconds, seconds, or minutes. You can enter multiple values separated by commas (,). Note: Retain the default value if possible. Configuring a small MAX_DELAY may cause results to be frequently pushed, affecting storage and query performance. This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database. +4. ROLLUP: specifies aggregate functions to roll up. Rolling up a function provides downsampled results based on multiple axes. This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database. You can specify only one function to roll up. The rollup takes effect on all columns except TS. Enter one of the following values: avg, sum, min, max, last, or first. +5. SMA: specifies functions on which to enable small materialized aggregates (SMA). SMA is user-defined precomputation of aggregates based on data blocks. Enter one of the following values: max, min, or sum This parameter can be used with supertables and standard tables. +6. TTL: specifies the time to live (TTL) for the table. If TTL is specified when creatinga table, after the time period for which the table has been existing is over TTL, TDengine will automatically delete the table. Please be noted that the system may not delete the table at the exact moment that the TTL expires but guarantee there is such a system and finally the table will be deleted. The unit of TTL is in days. The default value is 0, i.e. never expire. -### Create Subtable Using STable As Template +## Create Subtables -``` +### Create a Subtable + +```sql CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name TAGS (tag_value1, ...); ``` -The above command creates a subtable using the specified super table as a template and the specified tag values. +### Create a Subtable with Specified Tags -### Create Subtable Using STable As Template With A Subset of Tags - -``` +```sql CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...); ``` -The tags for which no value is specified will be set to NULL. +The preceding SQL statement creates a subtable based on a supertable but specifies a subset of tags to use. Tags that are not included in this subset are assigned a null value. -### Create Tables in Batch +### Create Multiple Subtables -``` +```sql CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...; ``` -This can be used to create a lot of tables in a single SQL statement while making table creation much faster. +You can create multiple subtables in a single SQL statement provided that all subtables use the same supertable. For performance reasons, do not create more than 3000 tables per statement. + +## Modify a Table -:::info +```sql +ALTER TABLE [db_name.]tb_name alter_table_clause + +alter_table_clause: { + alter_table_options + | ADD COLUMN col_name column_type + | DROP COLUMN col_name + | MODIFY COLUMN col_name column_type + | RENAME COLUMN old_col_name new_col_name +} + +alter_table_options: + alter_table_option ... + +alter_table_option: { + TTL value + | COMMENT 'string_value' +} -- Creating tables in batch must use a super table as a template. -- The length of single statement is suggested to be between 1,000 and 3,000 bytes for best performance. +``` -::: +**More explanations** +You can perform the following modifications on existing tables: +1. ADD COLUMN: adds a column to the supertable. +2. DROP COLUMN: deletes a column from the supertable. +3. MODIFY COLUMN: changes the length of the data type specified for the column. Note that you can only specify a length greater than the current length. +4. RENAME COLUMN: renames a specified column in the table. -## Drop Tables +### Add a Column -``` -DROP TABLE [IF EXISTS] tb_name; +```sql +ALTER TABLE tb_name ADD COLUMN field_name data_type; ``` -## Show All Tables In Current Database +### Delete a Column -``` -SHOW TABLES [LIKE tb_name_wildcard]; +```sql +ALTER TABLE tb_name DROP COLUMN field_name; ``` -## Show Create Statement of A Table +### Modify the Data Length +```sql +ALTER TABLE tb_name MODIFY COLUMN field_name data_type(length); ``` -SHOW CREATE TABLE tb_name; -``` - -This is useful when migrating the data in one TDengine cluster to another one because it can be used to create the exact same tables in the target database. -## Show Table Definition +### Rename a Column +```sql +ALTER TABLE tb_name RENAME COLUMN old_col_name new_col_name ``` -DESCRIBE tb_name; + +## Modify a Subtable + +```sql +ALTER TABLE [db_name.]tb_name alter_table_clause + +alter_table_clause: { + alter_table_options + | SET TAG tag_name = new_tag_value +} + +alter_table_options: + alter_table_option ... + +alter_table_option: { + TTL value + | COMMENT 'string_value' +} ``` -## Change Table Definition +**More explanations** +1. Only the value of a tag can be modified directly. For all other modifications, you must modify the supertable from which the subtable was created. -### Add A Column +### Change Tag Value Of Sub Table ``` -ALTER TABLE tb_name ADD COLUMN field_name data_type; +ALTER TABLE tb_name SET TAG tag_name=new_tag_value; ``` -:::info +## Delete a Table -1. The maximum number of columns is 4096, the minimum number of columns is 2. -2. The maximum length of a column name is 64 bytes. +The following SQL statement deletes one or more tables. -::: - -### Remove A Column - -``` -ALTER TABLE tb_name DROP COLUMN field_name; +```sql +DROP TABLE [IF EXISTS] [db_name.]tb_name [, [IF EXISTS] [db_name.]tb_name] ... ``` -:::note -If a table is created using a super table as template, the table definition can only be changed on the corresponding super table, and the change will be automatically applied to all the subtables created using this super table as template. For tables created in the normal way, the table definition can be changed directly on the table. +## View Tables -::: +### View All Tables -### Change Column Length +The following SQL statement shows all tables in the current database. -``` -ALTER TABLE tb_name MODIFY COLUMN field_name data_type(length); +```sql +SHOW TABLES [LIKE tb_name_wildchar]; ``` -If the type of a column is variable length, like BINARY or NCHAR, this command can be used to change the length of the column. +### View the CREATE Statement for a Table -:::note -If a table is created using a super table as template, the table definition can only be changed on the corresponding super table, and the change will be automatically applied to all the subtables created using this super table as template. For tables created in the normal way, the table definition can be changed directly on the table. +``` +SHOW CREATE TABLE tb_name; +``` -::: +This command is useful in migrating data from one TDengine cluster to another because it can be used to create the exact same tables in the target database. -### Change Tag Value Of Sub Table +## View the Table Schema ``` -ALTER TABLE tb_name SET TAG tag_name=new_tag_value; -``` - -This command can be used to change the tag value if the table is created using a super table as template. +DESCRIBE [db_name.]tb_name; +``` \ No newline at end of file diff --git a/docs/en/12-taos-sql/04-stable.md b/docs/en/12-taos-sql/04-stable.md index b8a608792ab327a81129d29ddd0ff44d7af6e6c5..6a0a0922cce7d9f831f333e4999789798be8d867 100644 --- a/docs/en/12-taos-sql/04-stable.md +++ b/docs/en/12-taos-sql/04-stable.md @@ -1,118 +1,159 @@ --- -sidebar_label: STable -title: Super Table +sidebar_label: Supertable +title: Supertable --- -:::note +## Create a Supertable -Keyword `STable`, abbreviated for super table, is supported since version 2.0.15. +```sql +CREATE STABLE [IF NOT EXISTS] stb_name (create_definition [, create_definitionn] ...) TAGS (create_definition [, create_definition] ...) [table_options] + +create_definition: + col_name column_definition + +column_definition: + type_name [COMMENT 'string_value'] +``` -::: +**More explanations** +- Each supertable can have a maximum of 4096 columns, including tags. The minimum number of columns is 3: a timestamp column used as the key, one tag column, and one data column. +- When you create a supertable, you can add comments to columns and tags. +- The TAGS keyword defines the tag columns for the supertable. The following restrictions apply to tag columns: + - A tag column can use the TIMESTAMP data type, but the values in the column must be fixed numbers. Timestamps including formulae, such as "now + 10s", cannot be stored in a tag column. + - The name of a tag column cannot be the same as the name of any other column. + - The name of a tag column cannot be a reserved keyword. + - Each supertable must contain between 1 and 128 tags. The total length of the TAGS keyword cannot exceed 16 KB. +- For more information about table parameters, see Create a Table. -## Create STable +## View a Supertable + +### View All Supertables ``` -CREATE STable [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]); +SHOW STABLES [LIKE tb_name_wildcard]; ``` -The SQL statement of creating a STable is similar to that of creating a table, but a special column set named `TAGS` must be specified with the names and types of the tags. - -:::info +The preceding SQL statement shows all supertables in the current TDengine database, including the name, creation time, number of columns, number of tags, and number of subtabels for each supertable. -1. A tag can be of type timestamp, since version 2.1.3.0, but its value must be fixed and arithmetic operations cannot be performed on it. Prior to version 2.1.3.0, tag types specified in TAGS could not be of type timestamp. -2. The tag names specified in TAGS should NOT be the same as other columns. -3. The tag names specified in TAGS should NOT be the same as any reserved keywords.(Please refer to [keywords](/taos-sql/keywords/) -4. The maximum number of tags specified in TAGS is 128, there must be at least one tag, and the total length of all tag columns should NOT exceed 16KB. - -::: - -## Drop STable +### View the CREATE Statement for a Supertable ``` -DROP STable [IF EXISTS] stb_name; +SHOW CREATE STABLE stb_name; ``` -All the subtables created using the deleted STable will be deleted automatically. +The preceding SQL statement can be used in migration scenarios. It returns the CREATE statement that was used to create the specified supertable. You can then use the returned statement to create an identical supertable on another TDengine database. -## Show All STables +## View the Supertable Schema ``` -SHOW STableS [LIKE tb_name_wildcard]; +DESCRIBE [db_name.]stb_name; ``` -This command can be used to display the information of all STables in the current database, including name, creation time, number of columns, number of tags, and number of tables created using this STable. - -## Show The Create Statement of A STable +## Drop STable ``` -SHOW CREATE STable stb_name; +DROP STABLE [IF EXISTS] [db_name.]stb_name ``` -This command is useful in migrating data from one TDengine cluster to another because it can be used to create the exact same STable in the target database. +Note: Deleting a supertable will delete all subtables created from the supertable, including all data within those subtables. -## Get STable Definition +## Modify a Supertable + +```sql +ALTER STABLE [db_name.]tb_name alter_table_clause + +alter_table_clause: { + alter_table_options + | ADD COLUMN col_name column_type + | DROP COLUMN col_name + | MODIFY COLUMN col_name column_type + | ADD TAG tag_name tag_type + | DROP TAG tag_name + | MODIFY TAG tag_name tag_type + | RENAME TAG old_tag_name new_tag_name +} + +alter_table_options: + alter_table_option ... + +alter_table_option: { + COMMENT 'string_value' +} ``` -DESCRIBE stb_name; -``` -## Change Columns Of STable +**More explanations** + +Modifications to the table schema of a supertable take effect on all subtables within the supertable. You cannot modify the table schema of subtables individually. When you modify the tag schema of a supertable, the modifications automatically take effect on all of its subtables. + +- ADD COLUMN: adds a column to the supertable. +- DROP COLUMN: deletes a column from the supertable. +- MODIFY COLUMN: changes the length of a BINARY or NCHAR column. Note that you can only specify a length greater than the current length. +- ADD TAG: adds a tag to the supertable. +- DROP TAG: deletes a tag from the supertable. When you delete a tag from a supertable, it is automatically deleted from all subtables within the supertable. +- MODIFY TAG: modifies the definition of a tag in the supertable. You can use this keyword to change the length of a BINARY or NCHAR tag column. Note that you can only specify a length greater than the current length. +- RENAME TAG: renames a specified tag in the supertable. When you rename a tag in a supertable, it is automatically renamed in all subtables within the supertable. -### Add A Column +### Add a Column ``` -ALTER STable stb_name ADD COLUMN field_name data_type; +ALTER STABLE stb_name ADD COLUMN col_name column_type; ``` -### Remove A Column +### Delete a Column ``` -ALTER STable stb_name DROP COLUMN field_name; +ALTER STABLE stb_name DROP COLUMN col_name; ``` -### Change Column Length +### Modify the Data Length ``` -ALTER STable stb_name MODIFY COLUMN field_name data_type(length); +ALTER STABLE stb_name MODIFY COLUMN col_name data_type(length); ``` -This command can be used to change (or more specifically, increase) the length of a column of variable length types, like BINARY or NCHAR. - -## Change Tags of A STable +The preceding SQL statement changes the length of a BINARY or NCHAR data column. Note that you can only specify a length greater than the current length. ### Add A Tag ``` -ALTER STable stb_name ADD TAG new_tag_name tag_type; +ALTER STABLE stb_name ADD TAG tag_name tag_type; ``` -This command is used to add a new tag for a STable and specify the tag type. +The preceding SQL statement adds a tag of the specified type to the supertable. A supertable cannot contain more than 128 tags. The total length of all tags in a supertable cannot exceed 16 KB. ### Remove A Tag ``` -ALTER STable stb_name DROP TAG tag_name; +ALTER STABLE stb_name DROP TAG tag_name; ``` -The tag will be removed automatically from all the subtables, created using the super table as template, once a tag is removed from a super table. +The preceding SQL statement deletes a tag from the supertable. When you delete a tag from a supertable, it is automatically deleted from all subtables within the supertable. ### Change A Tag ``` -ALTER STable stb_name CHANGE TAG old_tag_name new_tag_name; +ALTER STABLE stb_name RENAME TAG old_tag_name new_tag_name; ``` -The tag name will be changed automatically for all the subtables, created using the super table as template, once a tag name is changed for a super table. +The preceding SQL statement renames a tag in the supertable. When you rename a tag in a supertable, it is automatically renamed in all subtables within the supertable. ### Change Tag Length ``` -ALTER STable stb_name MODIFY TAG tag_name data_type(length); +ALTER STABLE stb_name MODIFY TAG tag_name data_type(length); ``` -This command can be used to change (or more specifically, increase) the length of a tag of variable length types, like BINARY or NCHAR. +The preceding SQL statement changes the length of a BINARY or NCHAR tag column. Note that you can only specify a length greater than the current length. (Available in 2.1.3.0 and later versions) + +### View a Supertable +You can run projection and aggregate SELECT queries on supertables, and you can filter by tag or column by using the WHERE keyword. + +If you do not include an ORDER BY clause, results are returned by subtable. These results are not ordered. You can include an ORDER BY clause in your query to strictly order the results. + + :::note -Changing tag values can be applied to only subtables. All other tag operations, like add tag, remove tag, however, can be applied to only STable. If a new tag is added for a STable, the tag will be added with NULL value for all its subtables. +All tag operations except for updating the value of a tag must be performed on the supertable and not on individual subtables. If you add a tag to an existing supertable, the tag is automatically added with a null value to all subtables within the supertable. ::: diff --git a/docs/en/12-taos-sql/05-insert.md b/docs/en/12-taos-sql/05-insert.md index 1336cd7238a19190583ea9d268a64df242ffd3c9..e7d56fb3c734affa92c8c71c190b1132cd89e335 100644 --- a/docs/en/12-taos-sql/05-insert.md +++ b/docs/en/12-taos-sql/05-insert.md @@ -1,4 +1,5 @@ --- +sidebar_label: Insert title: Insert --- @@ -17,47 +18,62 @@ INSERT INTO ...]; ``` -## Insert Single or Multiple Rows +**Timestamps** -Single row or multiple rows specified with VALUES can be inserted into a specific table. For example: +1. All data writes must include a timestamp. With regard to timestamps, note the following: -A single row is inserted using the below statement. +2. The precision of a timestamp depends on its format. The precision configured for the database affects only timestamps that are inserted as long integers (UNIX time). Timestamps inserted as date and time strings are not affected. As an example, the timestamp 2021-07-13 16:16:48 is equivalent to 1626164208 in UNIX time. This UNIX time is modified to 1626164208000 for databases with millisecond precision, 1626164208000000 for databases with microsecond precision, and 1626164208000000000 for databases with nanosecond precision. -```sq; +3. If you want to insert multiple rows simultaneously, do not use the NOW function in the timestamp. Using the NOW function in this situation will cause multiple rows to have the same timestamp and prevent them from being stored correctly. This is because the NOW function obtains the current time on the client, and multiple instances of NOW in a single statement will return the same time. + The earliest timestamp that you can use when inserting data is equal to the current time on the server minus the value of the KEEP parameter. The latest timestamp that you can use when inserting data is equal to the current time on the server plus the value of the DURATION parameter. You can configure the KEEP and DURATION parameters when you create a database. The default values are 3650 days for the KEEP parameter and 10 days for the DURATION parameter. + +**Syntax** + +1. The USING clause automatically creates the specified subtable if it does not exist. If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. Any tags that you do not specify will be assigned a null value. + +2. You can insert data into specified columns. Any columns in which you do not insert data will be assigned a null value. + +3. The VALUES clause inserts one or more rows of data into a table. + +4. The FILE clause inserts tags or data from a comma-separates values (CSV) file. Do not include headers in your CSV files. + +5. A single INSERT statement can write data to multiple tables. + +6. The INSERT statement is fully parsed before being executed, so that if any element of the statement fails, the entire statement will fail. For example, the following statement will not create a table because the latter part of the statement is invalid: + + ```sql + INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a'); + ``` + +7. However, an INSERT statement that writes data to multiple subtables can succeed for some tables and fail for others. This situation is caused because vnodes perform write operations independently of each other. One vnode failing to write data does not affect the ability of other vnodes to write successfully. + +## Insert a Record + +Single row or multiple rows specified with VALUES can be inserted into a specific table. A single row is inserted using the below statement. + +```sql INSERT INTO d1001 VALUES (NOW, 10.2, 219, 0.32); ``` +## Insert Multiple Records + Double rows are inserted using the below statement. ```sql INSERT INTO d1001 VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32) (1626164208000, 10.15, 217, 0.33); ``` -:::note - -1. In the second example above, different formats are used in the two rows to be inserted. In the first row, the timestamp format is a date and time string, which is interpreted from the string value only. In the second row, the timestamp format is a long integer, which will be interpreted based on the database time precision. -2. When trying to insert multiple rows in a single statement, only the timestamp of one row can be set as NOW, otherwise there will be duplicate timestamps among the rows and the result may be out of expectation because NOW will be interpreted as the time when the statement is executed. -3. The oldest timestamp that is allowed is subtracting the KEEP parameter from current time. -4. The newest timestamp that is allowed is adding the DAYS parameter to current time. - -::: - -## Insert Into Specific Columns +## Write to a Specified Column -Data can be inserted into specific columns, either single row or multiple row, while other columns will be inserted as NULL value. +Data can be inserted into specific columns, either single row or multiple row, while other columns will be inserted as NULL value. The key (timestamp) cannot be null. For example: -``` +```sql INSERT INTO d1001 (ts, current, phase) VALUES ('2021-07-13 14:06:33.196', 10.27, 0.31); ``` -:::info -If no columns are explicitly specified, all the columns must be provided with values, this is called "all column mode". The insert performance of all column mode is much better than specifying a subset of columns, so it's encouraged to use "all column mode" while providing NULL value explicitly for the columns for which no actual value can be provided. - -::: - ## Insert Into Multiple Tables -One or multiple rows can be inserted into multiple tables in a single SQL statement, with or without specifying specific columns. +One or multiple rows can be inserted into multiple tables in a single SQL statement, with or without specifying specific columns. For example: ```sql INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) @@ -66,19 +82,19 @@ INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07- ## Automatically Create Table When Inserting -If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. +If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. For example: ```sql INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32); ``` -It's not necessary to provide values for all tags when creating tables automatically, the tags without values provided will be set to NULL. +It's not necessary to provide values for all tags when creating tables automatically, the tags without values provided will be set to NULL. For example: ```sql INSERT INTO d21001 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:33.196', 10.15, 217, 0.33); ``` -Multiple rows can also be inserted into the same table in a single SQL statement. +Multiple rows can also be inserted into the same table in a single SQL statement. For example: ```sql INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) @@ -86,10 +102,6 @@ INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('202 d21003 USING meters (groupId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31); ``` -:::info -Prior to version 2.0.20.5, when using `INSERT` to create tables automatically and specifying the columns, the column names must follow the table name immediately. From version 2.0.20.5, the column names can follow the table name immediately, also can be put between `TAGS` and `VALUES`. In the same SQL statement, however, these two ways of specifying column names can't be mixed. -::: - ## Insert Rows From A File Besides using `VALUES` to insert one or multiple rows, the data to be inserted can also be prepared in a CSV file with comma as separator and each field value quoted by single quotes. Table definition is not required in the CSV file. For example, if file "/tmp/csvfile.csv" contains the below data: @@ -107,58 +119,13 @@ INSERT INTO d1001 FILE '/tmp/csvfile.csv'; ## Create Tables Automatically and Insert Rows From File -From version 2.1.5.0, tables can be automatically created using a super table as template when inserting data from a CSV file, like below: - ```sql INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile.csv'; ``` -Multiple tables can be automatically created and inserted in a single SQL statement, like below: +When writing data from a file, you can automatically create the specified subtable if it does not exist. For example: ```sql INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv' d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv'; ``` - -## More About Insert - -For SQL statement like `insert`, a stream parsing strategy is applied. That means before an error is found and the execution is aborted, the part prior to the error point has already been executed. Below is an experiment to help understand the behavior. - -First, a super table is created. - -```sql -CREATE TABLE meters(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS(location BINARY(30), groupId INT); -``` - -It can be proven that the super table has been created by `SHOW STableS`, but no table exists using `SHOW TABLES`. - -``` -taos> SHOW STableS; - name | created_time | columns | tags | tables | -============================================================================================ - meters | 2020-08-06 17:50:27.831 | 4 | 2 | 0 | -Query OK, 1 row(s) in set (0.001029s) - -taos> SHOW TABLES; -Query OK, 0 row(s) in set (0.000946s) -``` - -Then, try to create table d1001 automatically when inserting data into it. - -```sql -INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('a'); -``` - -The output shows the value to be inserted is invalid. But `SHOW TABLES` proves that the table has been created automatically by the `INSERT` statement. - -``` -DB error: invalid SQL: 'a' (invalid timestamp) (0.039494s) - -taos> SHOW TABLES; - table_name | created_time | columns | STable_name | -====================================================================================================== - d1001 | 2020-08-06 17:52:02.097 | 4 | meters | -Query OK, 1 row(s) in set (0.001091s) -``` - -From the above experiment, we can see that while the value to be inserted is invalid the table is still created. diff --git a/docs/en/12-taos-sql/06-select.md b/docs/en/12-taos-sql/06-select.md index 8a017cf92e40aa4a854dcd531b7df291a9243515..1dd0caed38235d3d10813b2cd74fec6446c5ec24 100644 --- a/docs/en/12-taos-sql/06-select.md +++ b/docs/en/12-taos-sql/06-select.md @@ -1,118 +1,124 @@ --- +sidebar_label: Select title: Select --- ## Syntax -```SQL -SELECT select_expr [, select_expr ...] - FROM {tb_name_list} - [WHERE where_condition] - [SESSION(ts_col, tol_val)] - [STATE_WINDOW(col)] - [INTERVAL(interval_val [, interval_offset]) [SLIDING sliding_val]] - [FILL(fill_mod_and_val)] - [GROUP BY col_list] - [ORDER BY col_list { DESC | ASC }] +```sql +SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE()} + +SELECT [DISTINCT] select_list + from_clause + [WHERE condition] + [PARTITION BY tag_list] + [window_clause] + [group_by_clause] + [order_by_clasue] [SLIMIT limit_val [SOFFSET offset_val]] [LIMIT limit_val [OFFSET offset_val]] - [>> export_file]; -``` + [>> export_file] -## Wildcard +select_list: + select_expr [, select_expr] ... -Wildcard \* can be used to specify all columns. The result includes only data columns for normal tables. +select_expr: { + * + | query_name.* + | [schema_name.] {table_name | view_name} .* + | t_alias.* + | expr [[AS] c_alias] +} -``` -taos> SELECT * FROM d1001; - ts | current | voltage | phase | -====================================================================================== - 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | - 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | - 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | -Query OK, 3 row(s) in set (0.001165s) -``` +from_clause: { + table_reference [, table_reference] ... + | join_clause [, join_clause] ... +} + +table_reference: + table_expr t_alias + +table_expr: { + table_name + | view_name + | ( subquery ) +} -The result includes both data columns and tag columns for super table. +join_clause: + table_reference [INNER] JOIN table_reference ON condition +window_clause: { + SESSION(ts_col, tol_val) + | STATE_WINDOW(col) + | INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)] + +changes_option: { + DURATION duration_val + | ROWS rows_val +} + +group_by_clause: + GROUP BY expr [, expr] ... HAVING condition + +order_by_clasue: + ORDER BY order_expr [, order_expr] ... + +order_expr: + {expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST] ``` -taos> SELECT * FROM meters; - ts | current | voltage | phase | location | groupid | -===================================================================================================================================== - 2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | California.LoSangeles | 2 | - 2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | California.LoSangeles | 2 | - 2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | California.LoSangeles | 3 | - 2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | California.LoSangeles | 3 | - 2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | California.SanFrancisco | 3 | - 2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | California.SanFrancisco | 3 | - 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | California.SanFrancisco | 2 | - 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | California.SanFrancisco | 2 | - 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | California.SanFrancisco | 2 | -Query OK, 9 row(s) in set (0.002022s) + +## Lists + +A query can be performed on some or all columns. Data and tag columns can all be included in the SELECT list. + +## Wildcards + +You can use an asterisk (\*) as a wildcard character to indicate all columns. For standard tables, the asterisk indicates only data columns. For supertables and subtables, tag columns are also included. + +```sql +SELECT * FROM d1001; ``` -Wildcard can be used with table name as prefix. Both SQL statements below have the same effect and return all columns. +You can use a table name as a prefix before an asterisk. For example, the following SQL statements both return all columns from the d1001 table: -```SQL +```sql SELECT * FROM d1001; SELECT d1001.* FROM d1001; ``` -In a JOIN query, however, the results are different with or without a table name prefix. \* without table prefix will return all the columns of both tables, but \* with table name as prefix will return only the columns of that table. +However, in a JOIN query, using a table name prefix with an asterisk returns different results. In this case, querying * returns all data in all columns in all tables (not including tags), whereas using a table name prefix returns all data in all columns in the specified table only. -``` -taos> SELECT * FROM d1001, d1003 WHERE d1001.ts=d1003.ts; - ts | current | voltage | phase | ts | current | voltage | phase | -================================================================================================================================== - 2018-10-03 14:38:05.000 | 10.30000| 219 | 0.31000 | 2018-10-03 14:38:05.000 | 10.80000| 223 | 0.29000 | -Query OK, 1 row(s) in set (0.017385s) +```sql +SELECT * FROM d1001, d1003 WHERE d1001.ts=d1003.ts; +SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts; ``` -``` -taos> SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts; - ts | current | voltage | phase | -====================================================================================== - 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | -Query OK, 1 row(s) in set (0.020443s) -``` +The first of the preceding SQL statements returns all columns from the d1001 and d1003 tables, but the second of the preceding SQL statements returns all columns from the d1001 table only. -Wildcard \* can be used with some functions, but the result may be different depending on the function being used. For example, `count(*)` returns only one column, i.e. the number of rows; `first`, `last` and `last_row` return all columns of the selected row. +With regard to the other SQL functions that support wildcards, the differences are as follows: +`count(*)` only returns one column. `first`, `last`, and `last_row` return all columns. -``` -taos> SELECT COUNT(*) FROM d1001; - count(*) | -======================== - 3 | -Query OK, 1 row(s) in set (0.001035s) -``` +### Tag Columns -``` -taos> SELECT FIRST(*) FROM d1001; - first(ts) | first(current) | first(voltage) | first(phase) | -========================================================================================= - 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | -Query OK, 1 row(s) in set (0.000849s) +You can query tag columns in supertables and subtables and receive results in the same way as querying data columns. + +```sql +SELECT location, groupid, current FROM d1001 LIMIT 2; ``` -## Tags +### Distinct Values -Starting from version 2.0.14, tag columns can be selected together with data columns when querying sub tables. Please note however, that, wildcard \* cannot be used to represent any tag column. This means that tag columns must be specified explicitly like the example below. +The DISTINCT keyword returns only values that are different over one or more columns. You can use the DISTINCT keyword with tag columns and data columns. -``` -taos> SELECT location, groupid, current FROM d1001 LIMIT 2; - location | groupid | current | -====================================================================== - California.SanFrancisco | 2 | 10.30000 | - California.SanFrancisco | 2 | 12.60000 | -Query OK, 2 row(s) in set (0.003112s) -``` +The following SQL statement returns distinct values from a tag column: -## Get distinct values +```sql +SELECT DISTINCT tag_name [, tag_name ...] FROM stb_name; +``` -`DISTINCT` keyword can be used to get all the unique values of tag columns from a super table. It can also be used to get all the unique values of data columns from a table or subtable. +The following SQL statement returns distinct values from a data column: ```sql -SELECT DISTINCT tag_name [, tag_name ...] FROM stb_name; SELECT DISTINCT col_name [, col_name ...] FROM tb_name; ``` @@ -124,231 +130,188 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name; ::: -## Columns Names of Result Set +### Column Names -When using `SELECT`, the column names in the result set will be the same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example +When using `SELECT`, the column names in the result set will be the same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example: -``` +```sql taos> SELECT ts, ts AS primary_key_ts FROM d1001; - ts | primary_key_ts | -==================================================== - 2018-10-03 14:38:05.000 | 2018-10-03 14:38:05.000 | - 2018-10-03 14:38:15.000 | 2018-10-03 14:38:15.000 | - 2018-10-03 14:38:16.800 | 2018-10-03 14:38:16.800 | -Query OK, 3 row(s) in set (0.001191s) ``` `AS` can't be used together with `first(*)`, `last(*)`, or `last_row(*)`. -## Implicit Columns +### Pseudocolumns + +**TBNAME** +The TBNAME pseudocolumn in a supertable contains the names of subtables within the supertable. -`Select_exprs` can be column names of a table, or function expression or arithmetic expression on columns. The maximum number of allowed column names and expressions is 256. Timestamp and the corresponding tag names will be returned in the result set if `interval` or `group by tags` are used, and timestamp will always be the first column in the result set. +The following SQL statement returns all unique subtable names and locations within the meters supertable: -## Table List +```mysql +SELECT DISTINCT TBNAME, location FROM meters; +``` -`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query. If no database is specified as current database in use, table names must be preceded with database name, like `power.d1001`. +Use the `INS_TAGS` system table in `INFORMATION_SCHEMA` to query the information for subtables in a supertable. For example, the following statement returns the name and tag values for each subtable in the `meters` supertable. -```SQL -SELECT * FROM power.d1001; +```mysql +SELECT table_name, tag_name, tag_type, tag_value FROM information_schema.ins_tags WHERE stable_name='meters'; ``` -has same effect as +The following SQL statement returns the number of subtables within the meters supertable. -```SQL -USE power; -SELECT * FROM d1001; +```mysql +SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters); ``` -## Special Query +In the preceding two statements, only tags can be used as filtering conditions in the WHERE clause. For example: -Some special query functions can be invoked without `FROM` sub-clause. For example, the statement below can be used to get the current database in use. +**\_QSTART and \_QEND** -``` -taos> SELECT DATABASE(); - database() | -================================= - power | -Query OK, 1 row(s) in set (0.000079s) -``` +The \_QSTART and \_QEND pseudocolumns contain the beginning and end of the time range of a query. If the WHERE clause in a statement does not contain valid timestamps, the time range is equal to [-2^63, 2^63 - 1]. -If no database is specified upon logging in and no database is specified with `USE` after login, NULL will be returned by `select database()`. +The \_QSTART and \_QEND pseudocolumns cannot be used in a WHERE clause. -``` -taos> SELECT DATABASE(); - database() | -================================= - NULL | -Query OK, 1 row(s) in set (0.000184s) -``` +**\_WSTART, \_WEND, and \_WDURATION** -The statement below can be used to get the version of client or server. +The \_WSTART, \_WEND, and \_WDURATION pseudocolumns indicate the beginning, end, and duration of a window. -``` -taos> SELECT CLIENT_VERSION(); - client_version() | -=================== - 2.0.0.0 | -Query OK, 1 row(s) in set (0.000070s) +These pseudocolumns can be used only in time window-based aggregations and must occur after the aggregation clause. -taos> SELECT SERVER_VERSION(); - server_version() | -=================== - 2.0.0.0 | -Query OK, 1 row(s) in set (0.000077s) -``` +**\_c0 and \_ROWTS** -The statement below is used to check the server status. An integer, like `1`, is returned if the server status is OK, otherwise an error code is returned. This is compatible with the status check for TDengine from connection pool or 3rd party tools, and can avoid the problem of losing the connection from a connection pool when using the wrong heartbeat checking SQL statement. +In TDengine, the first column of all tables must be a timestamp. This column is the primary key of the table. The \_c0 and \_ROWTS pseudocolumns both represent the values of this column. These pseudocolumns enable greater flexibility and standardization. For example, you can use functions such as MAX and MIN with these pseudocolumns. +```sql +select _rowts, max(current) from meters; ``` -taos> SELECT SERVER_STATUS(); - server_status() | -================== - 1 | -Query OK, 1 row(s) in set (0.000074s) -taos> SELECT SERVER_STATUS() AS status; - status | -============== - 1 | -Query OK, 1 row(s) in set (0.000081s) -``` +## Query Objects -## \_block_dist +`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query. +If no database is specified as current database in use, table names must be preceded with database name, for example, `power.d1001`. -**Description**: Get the data block distribution of a table or STable. +You can perform INNER JOIN statements based on the primary key. The following conditions apply: -```SQL title="Syntax" -SELECT _block_dist() FROM { tb_name | stb_name } -``` +1. You can use FROM table list or an explicit JOIN clause. +2. For standard tables and subtables, you must specify an ON condition and the condition must be equivalent to the primary key. +3. For supertables, the ON condition must be equivalent to the primary key. In addition, the tag columns of the tables on which the INNER JOIN is performed must have a one-to-one relationship. You cannot specify an OR condition. +4. The tables that are included in a JOIN clause must be of the same type (supertable, standard table, or subtable). +5. You can include subqueries before and after the JOIN keyword. +6. You cannot include more than ten tables in a JOIN clause. +7. You cannot include a FILL clause and a JOIN clause in the same statement. -**Restrictions**:No argument is allowed, where clause is not allowed +## GROUP BY -**Sub Query**:Sub query or nested query are not supported +If you use a GROUP BY clause, the SELECT list can only include the following items: -**Return value**: A string which includes the data block distribution of the specified table or STable, i.e. the histogram of rows stored in the data blocks of the table or STable. +1. Constants +2. Aggregate functions +3. Expressions that are consistent with the expression following the GROUP BY clause +4. Expressions that include the preceding expression -```text title="Result" -summary: -5th=[392], 10th=[392], 20th=[392], 30th=[392], 40th=[792], 50th=[792] 60th=[792], 70th=[792], 80th=[792], 90th=[792], 95th=[792], 99th=[792] Min=[392(Rows)] Max=[800(Rows)] Avg=[666(Rows)] Stddev=[2.17] Rows=[2000], Blocks=[3], Size=[5.440(Kb)] Comp=[0.23] RowsInMem=[0] SeekHeaderTime=[1(us)] -``` +The GROUP BY clause groups each row of data by the value of the expression following the clause and returns a combined result for each group. -**More explanation about above example**: +The expressions in a GROUP BY clause can include any column in any table or view. It is not necessary that the expressions appear in the SELECT list. -- Histogram about the rows stored in the data blocks of the table or STable: the value of rows for 5%, 10%, 20%, 30%, 40%, 50%, 60%, 70%, 80%, 90%, 95%, and 99% -- Minimum number of rows stored in a data block, i.e. Min=[392(Rows)] -- Maximum number of rows stored in a data block, i.e. Max=[800(Rows)] -- Average number of rows stored in a data block, i.e. Avg=[666(Rows)] -- stddev of number of rows, i.e. Stddev=[2.17] -- Total number of rows, i.e. Rows[2000] -- Total number of data blocks, i.e. Blocks=[3] -- Total disk size consumed, i.e. Size=[5.440(Kb)] -- Compression ratio, which means the compressed size divided by original size, i.e. Comp=[0.23] -- Total number of rows in memory, i.e. RowsInMem=[0], which means no rows in memory -- The time spent on reading head file (to retrieve data block information), i.e. SeekHeaderTime=[1(us)], which means 1 microsecond. +The GROUP BY clause does not guarantee that the results are ordered. If you want to ensure that grouped data is ordered, use the ORDER BY clause. -## Special Keywords in TAOS SQL -- `TBNAME`: it is treated as a special tag when selecting on a super table, representing the name of subtables in that super table. -- `_c0`: represents the first column of a table or super table. +## PARTITION BY -## Tips +The PARTITION BY clause is a TDengine-specific extension to standard SQL. This clause partitions data based on the part_list and performs computations per partition. -To get all the subtables and corresponding tag values from a super table: +For more information, see TDengine Extensions. -```SQL -SELECT TBNAME, location FROM meters; -``` +## ORDER BY + +The ORDER BY keyword orders query results. If you do not include an ORDER BY clause in a query, the order of the results can be inconsistent. + +You can specify integers after ORDER BY to indicate the order in which you want the items in the SELECT list to be displayed. For example, 1 indicates the first item in the select list. + +You can specify ASC for ascending order or DESC for descending order. + +You can also use the NULLS keyword to specify the position of null values. Ascending order uses NULLS LAST by default. Descending order uses NULLS FIRST by default. + +## LIMIT + +The LIMIT keyword controls the number of results that are displayed. You can also use the OFFSET keyword to specify the result to display first. `LIMIT` and `OFFSET` are executed after `ORDER BY` in the query execution. You can include an offset in a LIMIT clause. For example, LIMIT 5 OFFSET 2 can also be written LIMIT 2, 5. Both of these clauses display the third through the seventh results. + +In a statement that includes a PARTITON BY clause, the LIMIT keyword is performed on each partition, not on the entire set of results. + +## SLIMIT + +The SLIMIT keyword is used with a PARTITION BY clause to control the number of partitions that are displayed. You can include an offset in a SLIMIT clause. For example, SLIMIT 5 OFFSET 2 can also be written LIMIT 2, 5. Both of these clauses display the third through the seventh partitions. + +Note: If you include an ORDER BY clause, only one partition can be displayed. + +## Special Query -To get the number of sub tables in a super table: +Some special query functions can be invoked without `FROM` sub-clause. -```SQL -SELECT COUNT(TBNAME) FROM meters; +## Obtain Current Database + +The following SQL statement returns the current database. If a database has not been specified on login or with the `USE` command, a null value is returned. + +```sql +SELECT DATABASE(); ``` -Only filter on `TAGS` are allowed in the `where` clause for above two query statements. For example: +### Obtain Current Version +```sql +SELECT CLIENT_VERSION(); +SELECT SERVER_VERSION(); ``` -taos> SELECT TBNAME, location FROM meters; - tbname | location | -================================================================== - d1004 | California.LosAngeles | - d1003 | California.LosAngeles | - d1002 | California.SanFrancisco | - d1001 | California.SanFrancisco | -Query OK, 4 row(s) in set (0.000881s) -taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; - count(tbname) | -======================== - 2 | -Query OK, 1 row(s) in set (0.001091s) +## Obtain Server Status + +The following SQL statement returns the status of the TDengine server. An integer indicates that the server is running normally. An error code indicates that an error has occurred. This statement can also detect whether a connection pool or third-party tool is connected to TDengine properly. By using this statement, you can ensure that connections in a pool are not lost due to an incorrect heartbeat detection statement. + +```sql +SELECT SERVER_STATUS(); ``` -- Wildcard \* can be used to get all columns, or specific column names can be specified. Arithmetic operation can be performed on columns of numerical types, columns can be renamed in the result set. -- Arithmetic operation on columns can't be used in where clause. For example, `where a*2>6;` is not allowed but `where a>6/2;` can be used instead for the same purpose. -- Arithmetic operation on columns can't be used as the objectives of select statement. For example, `select min(2*a) from t;` is not allowed but `select 2*min(a) from t;` can be used instead. -- Logical operation can be used in `WHERE` clause to filter numeric values, wildcard can be used to filter string values. -- Result sets are arranged in ascending order of the first column, i.e. timestamp, but it can be controlled to output as descending order of timestamp. If `order by` is used on other columns, the result may not be as expected. By the way, \_c0 is used to represent the first column, i.e. timestamp. -- `LIMIT` parameter is used to control the number of rows to output. `OFFSET` parameter is used to specify from which row to output. `LIMIT` and `OFFSET` are executed after `ORDER BY` in the query execution. A simple tip is that `LIMIT 5 OFFSET 2` can be abbreviated as `LIMIT 2, 5`. -- What is controlled by `LIMIT` is the number of rows in each group when `GROUP BY` is used. -- `SLIMIT` parameter is used to control the number of groups when `GROUP BY` is used. Similar to `LIMIT`, `SLIMIT 5 OFFSET 2` can be abbreviated as `SLIMIT 2, 5`. -- ">>" can be used to output the result set of `select` statement to the specified file. +### Obtain Current Time -## Where +```sql +SELECT NOW(); +``` -Logical operations in below table can be used in the `where` clause to filter the resulting rows. +### Obtain Current Date -| **Operation** | **Note** | **Applicable Data Types** | -| ------------- | ------------------------ | ----------------------------------------- | -| > | larger than | all types except bool | -| < | smaller than | all types except bool | -| >= | larger than or equal to | all types except bool | -| <= | smaller than or equal to | all types except bool | -| = | equal to | all types | -| <\> | not equal to | all types | -| is [not] null | is null or is not null | all types | -| between and | within a certain range | all types except bool | -| in | match any value in a set | all types except first column `timestamp` | -| like | match a wildcard string | **`binary`** **`nchar`** | -| match/nmatch | filter regex | **`binary`** **`nchar`** | +```sql +SELECT TODAY(); +``` -**Explanations**: +### Obtain Current Time Zone -- Operator `<\>` is equal to `!=`, please note that this operator can't be used on the first column of any table, i.e.timestamp column. -- Operator `like` is used together with wildcards to match strings - - '%' matches 0 or any number of characters, '\_' matches any single ASCII character. - - `\_` is used to match the \_ in the string. - - The maximum length of wildcard string is 100 bytes from version 2.1.6.1 (before that the maximum length is 20 bytes). `maxWildCardsLength` in `taos.cfg` can be used to control this threshold. A very long wildcard string may slowdown the execution performance of `LIKE` operator. -- `AND` keyword can be used to filter multiple columns simultaneously. AND/OR operation can be performed on single or multiple columns from version 2.3.0.0. However, before 2.3.0.0 `OR` can't be used on multiple columns. -- For timestamp column, only one condition can be used; for other columns or tags, `OR` keyword can be used to combine multiple logical operators. For example, `((value > 20 AND value < 30) OR (value < 12))`. - - From version 2.3.0.0, multiple conditions can be used on timestamp column, but the result set can only contain single time range. -- From version 2.0.17.0, operator `BETWEEN AND` can be used in where clause, for example `WHERE col2 BETWEEN 1.5 AND 3.25` means the filter condition is equal to "1.5 ≤ col2 ≤ 3.25". -- From version 2.1.4.0, operator `IN` can be used in the where clause. For example, `WHERE city IN ('California.SanFrancisco', 'California.SanDiego')`. For bool type, both `{true, false}` and `{0, 1}` are allowed, but integers other than 0 or 1 are not allowed. FLOAT and DOUBLE types are impacted by floating point precision errors. Only values that match the condition within the tolerance will be selected. Non-primary key column of timestamp type can be used with `IN`. -- From version 2.3.0.0, regular expression is supported in the where clause with keyword `match` or `nmatch`. The regular expression is case insensitive. +```sql +SELECT TIMEZONE(); +``` ## Regular Expression ### Syntax -```SQL +```txt WHERE (column|tbname) **match/MATCH/nmatch/NMATCH** _regex_ ``` ### Specification -The regular expression being used must be compliant with POSIX specification, please refer to [Regular Expressions](https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap09.html). +TDengine supports POSIX regular expression syntax. For more information, see [Regular Expressions](https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap09.html). ### Restrictions -Regular expression can be used against only table names, i.e. `tbname`, and tags of binary/nchar types, but can't be used against data columns. +Regular expression filtering is supported only on table names (TBNAME), BINARY tags, and NCHAR tags. Regular expression filtering cannot be performed on data columns. -The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client. +A regular expression string cannot exceed 128 bytes. You can configure this value by modifying the maxRegexStringLen parameter on the TDengine Client. The modified value takes effect when the client is restarted. ## JOIN -From version 2.2.0.0, inner join is fully supported in TDengine. More specifically, the inner join between table and table, between STable and STable, and between sub query and sub query are supported. +TDengine supports natural joins between supertables, between standard tables, and between subqueries. The difference between natural joins and inner joins is that natural joins require that the fields being joined in the supertables or standard tables must have the same name. Data or tag columns must be joined with the equivalent column in another table. -Only primary key, i.e. timestamp, can be used in the join operation between table and table. For example: +For standard tables, only the timestamp (primary key) can be used in join operations. For example: ```sql SELECT * @@ -356,25 +319,26 @@ FROM temp_tb_1 t1, pressure_tb_1 t2 WHERE t1.ts = t2.ts ``` -In the join operation between STable and STable, besides the primary key, i.e. timestamp, tags can also be used. For example: +For supertables, tags as well as timestamps can be used in join operations. For example: ```sql SELECT * -FROM temp_STable t1, temp_STable t2 +FROM temp_stable t1, temp_stable t2 WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0; ``` -Similarly, join operations can be performed on the result set of multiple sub queries. +Similarly, join operations can be performed on the result sets of multiple subqueries. :::note -Restrictions on join operation: -- The number of tables or STables in a single join operation can't exceed 10. -- `FILL` is not allowed in the query statement that includes JOIN operation. -- Arithmetic operation is not allowed on the result set of join operation. -- `GROUP BY` is not allowed on a part of tables that participate in join operation. -- `OR` can't be used in the conditions for join operation -- join operation can't be performed on data columns, i.e. can only be performed on tags or primary key, i.e. timestamp +The following restriction apply to JOIN statements: + +- The number of tables or supertables in a single join operation cannot exceed 10. +- `FILL` cannot be used in a JOIN statement. +- Arithmetic operations cannot be performed on the result sets of join operation. +- `GROUP BY` is not allowed on a segment of the tables that participate in a join operation. +- `OR` cannot be used in the conditions for join operation +- Join operation can be performed only on tags or timestamps. You cannot perform a join operation on data columns. ::: @@ -384,7 +348,7 @@ Nested query is also called sub query. This means that in a single SQL statement From 2.2.0.0, unassociated sub query can be used in the `FROM` clause. Unassociated means the sub query doesn't use the parameters in the parent query. More specifically, in the `tb_name_list` of `SELECT` statement, an independent SELECT statement can be used. So a complete nested query looks like: -```SQL +``` SELECT ... FROM (SELECT ... FROM ...) ...; ``` @@ -408,42 +372,42 @@ SELECT ... FROM (SELECT ... FROM ...) ...; ## UNION ALL -```SQL title=Syntax +```txt title=Syntax SELECT ... UNION ALL SELECT ... [UNION ALL SELECT ...] ``` -`UNION ALL` operator can be used to combine the result set from multiple select statements as long as the result set of these select statements have exactly the same columns. `UNION ALL` doesn't remove redundant rows from multiple result sets. In a single SQL statement, at most 100 `UNION ALL` can be supported. +TDengine supports the `UNION ALL` operation. `UNION ALL` operator can be used to combine the result set from multiple select statements as long as the result set of these select statements have exactly the same columns. `UNION ALL` doesn't remove redundant rows from multiple result sets. In a single SQL statement, at most 100 `UNION ALL` can be supported. ### Examples table `tb1` is created using below SQL statement: -```SQL +``` CREATE TABLE tb1 (ts TIMESTAMP, col1 INT, col2 FLOAT, col3 BINARY(50)); ``` The rows in the past one hour in `tb1` can be selected using below SQL statement: -```SQL +``` SELECT * FROM tb1 WHERE ts >= NOW - 1h; ``` The rows between 2018-06-01 08:00:00.000 and 2018-06-02 08:00:00.000 and col3 ends with 'nny' can be selected in the descending order of timestamp using below SQL statement: -```SQL +``` SELECT * FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND ts <= '2018-06-02 08:00:00.000' AND col3 LIKE '%nny' ORDER BY ts DESC; ``` The sum of col1 and col2 for rows later than 2018-06-01 08:00:00.000 and whose col2 is bigger than 1.2 can be selected and renamed as "complex", while only 10 rows are output from the 5th row, by below SQL statement: -```SQL +``` SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND col2 > 1.2 LIMIT 10 OFFSET 5; ``` The rows in the past 10 minutes and whose col2 is bigger than 3.14 are selected and output to the result file `/home/testoutput.csv` with below SQL statement: -```SQL +``` SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutput.csv; ``` diff --git a/docs/en/12-taos-sql/08-delete-data.mdx b/docs/en/12-taos-sql/08-delete-data.mdx index 86443dca53b08f5f5c489d40f4a2ea8afc8081fb..999c467ad05b6d3e349e322141acfb02a49de5ff 100644 --- a/docs/en/12-taos-sql/08-delete-data.mdx +++ b/docs/en/12-taos-sql/08-delete-data.mdx @@ -4,8 +4,7 @@ description: "Delete data from table or Stable" title: Delete Data --- -TDengine provides the functionality of deleting data from a table or STable according to specified time range, it can be used to cleanup abnormal data generated due to device failure. Please be noted that this functionality is only available in Enterprise version, please refer to [TDengine Enterprise Edition](https://tdengine.com/products#enterprise-edition-link) - +TDengine provides the functionality of deleting data from a table or STable according to specified time range, it can be used to cleanup abnormal data generated due to device failure. **Syntax:** @@ -16,21 +15,21 @@ DELETE FROM [ db_name. ] tb_name [WHERE condition]; **Description:** Delete data from a table or STable **Parameters:** - + - `db_name`: Optional parameter, specifies the database in which the table exists; if not specified, the current database will be used. - `tb_name`: Mandatory parameter, specifies the table name from which data will be deleted, it can be normal table, subtable or STable. -- `condition`: Optional parameter, specifies the data filter condition. If no condition is specified all data will be deleted, so please be cautions to delete data without any condition. The condition used here is only applicable to the first column, i.e. the timestamp column. If the table is a STable, the condition is also applicable to tag columns. +- `condition`: Optional parameter, specifies the data filter condition. If no condition is specified all data will be deleted, so please be cautions to delete data without any condition. The condition used here is only applicable to the first column, i.e. the timestamp column. **More Explanations:** - -The data can't be recovered once deleted, so please be cautious to use the functionality of deleting data. It's better to firstly make sure the data to be deleted using `select` then execute `delete`. + +The data can't be recovered once deleted, so please be cautious to use the functionality of deleting data. It's better to firstly make sure the data to be deleted using `select` then execute `delete`. **Example:** - -`meters` is a STable, in which `groupid` is a tag column of int type. Now we want to delete the data older than 2021-10-01 10:40:00.100 and `groupid` is 1. The SQL for this purpose is like below: + +`meters` is a STable, in which `groupid` is a tag column of int type. Now we want to delete the data older than 2021-10-01 10:40:00.100. You can perform this action by running the following SQL statement: ```sql -delete from meters where ts < '2021-10-01 10:40:00.100' and groupid=1 ; +delete from meters where ts < '2021-10-01 10:40:00.100' ; ``` The output is: diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index 6375422b07a2ee7d5c9b6a0074060a39888da773..d6905c84a11ecf1f827ad239076ad5d4c93f8e3e 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -1,13 +1,14 @@ --- +sidebar_label: Functions title: Functions toc_max_heading_level: 4 --- -## Single-Row Functions +## Single Row Functions -Single-Row functions return a result row for each row in the query result. +Single row functions return a result for each row. -### Numeric Functions +### Mathematical Functions #### ABS @@ -17,16 +18,15 @@ SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause] **Description**: The absolute value of a specific field. -**Return value type**: Same as input type. +**Return value type**: Same as the field being used -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables -**More explanations**: -- Cannot be used with aggregate functions. +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. #### ACOS @@ -34,18 +34,17 @@ SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause] SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The anti-cosine of a specific field. +**Description**: The arc cosine of a specific field. -**Return value type**: DOUBLE. +**Return value type**: Double -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables -**More explanations**: -- Cannot be used with aggregate functions. +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. #### ASIN @@ -53,18 +52,18 @@ SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause] SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The anti-sine of a specific field. +**Description**: The arc sine of a specific field. -**Return value type**: DOUBLE. +**Return value type**: Double -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables + +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. -**More explanations**: -- Cannot be used with aggregate functions. #### ATAN @@ -72,37 +71,36 @@ SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause] SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: anti-tangent of a specific field. +**Description**: The arc tangent of a specific field. -**Return value type**: DOUBLE. +**Return value type**: Double -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables + +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. -**More explanations**: -- Cannot be used with aggregate functions. #### CEIL -``` +```sql SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The rounded up value of a specific field. +**Description**: The rounded up value of a specific field -**Return value type**: Same as input type. +**Return value type**: Same as the field being used -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable +**Applicable table types**: standard tables and supertables -**Applicable nested query**: Inner query and outer query. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**More explanations**: -- Can't be used with aggregate functions. +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. #### COS @@ -112,45 +110,43 @@ SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause] **Description**: The cosine of a specific field. -**Return value type**: DOUBLE. +**Return value type**: Double -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables -**More explanations**: -- Can't be used with aggregate functions. +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. #### FLOOR -``` +```sql SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The rounded down value of a specific field. - -**More explanations**: Refer to `CEIL` function for usage restrictions. +**Description**: The rounded down value of a specific field + **More explanations**: The restrictions are same as those of the `CEIL` function. #### LOG ```sql -SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause] +SELECT LOG(field_name[, base]) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The logarithm of a specific field with `base` as the radix. If `base` parameter is ignored, natural logarithm of the field is returned. +**Description**: The logarithm of a specific field with `base` as the radix. If you do not enter a base, the natural logarithm of the field is returned. -**Return value type**: DOUBLE. +**Return value type**: Double -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables + +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. -**More explanations**: -- Can't be used with aggregate functions #### POW @@ -158,28 +154,28 @@ SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause] SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The power of a specific field with `power` as the index. +**Description**: The power of a specific field with `power` as the exponent. -**Return value type**: DOUBLE. +**Return value type**: Double -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables + +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. -**More explanations**: -- Can't be used with aggregate functions. #### ROUND -``` +```sql SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The rounded value of a specific field. +**Description**: The rounded value of a specific field. + **More explanations**: The restrictions are same as those of the `CEIL` function. -**More explanations**: Refer to `CEIL` function for usage restrictions. #### SIN @@ -189,18 +185,15 @@ SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause] **Description**: The sine of a specific field. -**Description**: The anti-cosine of a specific field. - -**Return value type**: DOUBLE. +**Return value type**: Double -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables -**More explanations**: -- Can't be used with aggregate functions. +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. #### SQRT @@ -210,16 +203,15 @@ SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause] **Description**: The square root of a specific field. -**Return value type**: DOUBLE. +**Return value type**: Double -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables -**More explanations**: -- Can't be used with aggregate functions. +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. #### TAN @@ -229,39 +221,35 @@ SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause] **Description**: The tangent of a specific field. -**Description**: The anti-cosine of a specific field. - -**Return value type**: DOUBLE. +**Return value type**: Double -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables -**More explanations**: -- Can't be used with aggregate functions. +**Usage**: This function can only be used on data columns. It can be used with selection and projection functions but not with aggregation functions. -### String Functions +### Concatenation Functions -String functiosn take strings as input and output numbers or strings. +Concatenation functions take strings as input and produce string or numeric values as output. #### CHAR_LENGTH -``` +```sql SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The mumber of characters of a string. +**Description**: The length in number of characters of a string -**Return value type**: INTEGER. +**Return value type**: Bigint -**Applicable data types**: VARCHAR, NCHAR. +**Applicable data types**: VARCHAR and NCHAR -**Applicable table types**: table, STable. - -**Applicable nested query**: Inner query and Outer query. +**Nested query**: It can be used in both the outer query and inner query in a nested query. +**Applicable table types**: standard tables and supertables #### CONCAT @@ -269,144 +257,139 @@ SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause] SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The concatenation result of two or more strings. +**Description**: The concatenation result of two or more strings -**Return value type**: If all input strings are VARCHAR type, the result is VARCHAR type too. If any one of input strings is NCHAR type, then the result is NCHAR. If input strings contain NULL value, the result is NULL. +**Return value type**: If the concatenated strings are VARCHARs, the result is a VARCHAR. If the concatenated strings are NCHARs, the result is an NCHAR. If an input value is null, the result is null. -**Applicable data types**: VARCHAR, NCHAR. At least 2 input strings are required, and at most 8 input strings are allowed. +**Applicable data types**: VARCHAR and NCHAR You can concatenate between 2 and 8 strings. -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables #### CONCAT_WS -``` +```sql SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The concatenation result of two or more strings with separator. +**Description**: The concatenation result of two or more strings with separator -**Return value type**: If all input strings are VARCHAR type, the result is VARCHAR type too. If any one of input strings is NCHAR type, then the result is NCHAR. If input strings contain NULL value, the result is NULL. +**Return value type**: If the concatenated strings are VARCHARs, the result is a VARCHAR. If the concatenated strings are NCHARs, the result is an NCHAR. If an input value is null, the result is null. -**Applicable data types**: VARCHAR, NCHAR. At least 3 input strings are required, and at most 9 input strings are allowed. +**Applicable data types**: VARCHAR and NCHAR You can concatenate between 3 and 9 strings. -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables #### LENGTH -``` +```sql SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The length in bytes of a string. +**Description**: The length in bytes of a string -**Return value type**: INTEGER. +**Return value type**: Bigint -**Applicable data types**: VARCHAR, NCHAR. +**Applicable data types**: VARCHAR and NCHAR fields or columns -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables #### LOWER -``` +```sql SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: Convert the input string to lower case. +**Description**: Convert the input string to lower case -**Return value type**: Same as input type. +**Return value type**: Same as input -**Applicable data types**: VARCHAR, NCHAR. +**Applicable data types**: VARCHAR and NCHAR -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables #### LTRIM -``` +```sql SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: Remove the left leading blanks of a string. +**Description**: Remove the left leading blanks of a string -**Return value type**: Same as input type. +**Return value type**: Same as input -**Applicable data types**: VARCHAR, NCHAR. +**Applicable data types**: VARCHAR and NCHAR -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables #### RTRIM -``` -SELECT RTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause] +```sql +SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: Remove the right tailing blanks of a string. +**Description**: Remove the right tailing blanks of a string -**Return value type**: Same as input type. +**Return value type**: Same as input -**Applicable data types**: VARCHAR, NCHAR. +**Applicable data types**: VARCHAR and NCHAR -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables #### SUBSTR -``` +```sql SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The sub-string starting from `pos` with length of `len` from the original string `str`. - -**Return value type**: Same as input type. +**Description**: The sub-string starting from `pos` with length of `len` from the original string `str` - If `len` is not specified, it means from `pos` to the end. -**Applicable data types**: VARCHAR, NCHAR. +**Return value type**: Same as input -**Applicable table types**: table, STable. +**Applicable data types**: VARCHAR and NCHAR Parameter `pos` can be an positive or negative integer; If it's positive, the starting position will be counted from the beginning of the string; if it's negative, the starting position will be counted from the end of the string. -**Applicable nested query**: Inner query and Outer query. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**More explanations**: +**Applicable table types**: table, STable -- If the input is NULL, the output is NULL -- Parameter `pos` can be an positive or negative integer; If it's positive, the starting position will be counted from the beginning of the string; if it's negative, the starting position will be counted from the end of the string. -- If `len` is not specified, it means from `pos` to the end of string. #### UPPER -``` +```sql SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: Convert the input string to upper case. +**Description**: Convert the input string to upper case -**Return value type**: Same as input type. +**Return value type**: Same as input -**Applicable data types**: VARCHAR, NCHAR. +**Applicable data types**: VARCHAR and NCHAR -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: table, STable ### Conversion Functions -Conversion functions convert from one data type to another. +Conversion functions change the data type of a value. #### CAST @@ -414,19 +397,23 @@ Conversion functions convert from one data type to another. SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: Used for type casting. Convert `expression` to the type specified by `type_name`. +**Description**: Convert the input data `expression` into the type specified by `type_name`. This function can be used only in SELECT statements. -**Return value type**: The type specified by parameter `type_name`. +**Return value type**: The type specified by parameter `type_name` -**Applicable data types**: `expression` can be any data type except for JSON. +**Applicable data types**: All data types except JSON + +**Nested query**: It can be used in both the outer query and inner query in a nested query. + +**Applicable table types**: standard tables and supertables **More explanations**: -- Error will be reported for unsupported type casting. +- Error will be reported for unsupported type casting - Some values of some supported data types may not be casted, below are known issues: - 1)When casting VARCHAR/NCHAR to BIGINT/BIGINT UNSIGNED, some characters may be treated as illegal, for example "a" may be converted to 0. - 2)When casting to numeric type, if converted result is out of range the destination data type can hold, overflow may occur and casting behavior is undefined. - 3) When casting to VARCHAR/NCHAR type, if converted string length exceeds the length specified in `type_name`, the result will be truncated. (e.g. CAST("abcd" as BINARY(2)) will return string "ab"). + 1. Some strings cannot be converted to numeric values. For example, the string `a` may be converted to `0`. However, this does not produce an error. + 2. If a converted numeric value is larger than the maximum size for the specified type, an overflow will occur. However, this does not produce an error. + 3. If a converted string value is larger than the maximum size for the specified type, the output value will be truncated. However, this does not produce an error. #### TO_ISO8601 @@ -434,18 +421,22 @@ SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause] SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The ISO8601 date/time format converted from a UNIX timestamp, with timezone attached. `timezone` parameter allows attaching any customized timezone string to the output format. If `timezone` parameter is not specified, the timezone information of client side system will be attached. +**Description**: The ISO8601 date/time format converted from a UNIX timestamp, plus the timezone. You can specify any time zone with the timezone parameter. If you do not enter this parameter, the time zone on the client is used. -**Return value type**: VARCHAR. +**Return value type**: VARCHAR -**Applicable data types**: INTEGER, TIMESTAMP. +**Applicable data types**: Integers and timestamps -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. + +**Applicable table types**: standard tables and supertables **More explanations**: -- If the input is INTEGER represents UNIX timestamp, the precision of the returned value is determined by the digits of the input integer. -- If the input is of TIMESTAMP type, The precision of the returned value is same as the precision set for the current database in use. +- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。 For example, TO_ISO8601(1, "+00:00"). +- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp +- If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use + #### TO_JSON @@ -453,38 +444,44 @@ SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause]; SELECT TO_JSON(str_literal) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: Convert a JSON string to a JSON body. +**Description**: Converts a string into JSON. + +**Return value type**: JSON -**Return value type**: JSON. +**Applicable data types**: JSON strings in the form `{"literal": literal}`. `{}` indicates a null value. The key must be a string literal. The value can be a numeric literal, string literal, Boolean literal, or null literal. str_literal cannot include escape characters. -**Applicable data types**: JSON string, in the format like '{ "literal" : literal }'. '{}' is NULL value. keys in the string must be string constants, values can be constants of numeric types, bool, string or NULL. Escaping characters are not allowed in the JSON string. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable table types**: table, STable. +**Applicable table types**: table, STable -**Applicable nested query**: Inner query and Outer query. #### TO_UNIXTIMESTAMP ```sql -SELECT TO_UNIXTIMESTAMP(datetime_string | ts_col) FROM { tb_name | stb_name } [WHERE clause]; +SELECT TO_UNIXTIMESTAMP(datetime_string) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: UNIX timestamp converted from a string of date/time format. +**Description**: UNIX timestamp converted from a string of date/time format + +**Return value type**: BIGINT -**Return value type**: BIGINT. +**Applicable column types**: VARCHAR and NCHAR -**Applicable data types**: VARCHAR, NCHAR. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables **More explanations**: -- The input string must be compatible with ISO8601/RFC3339 standard, NULL will be returned if the string cannot be converted. -- The precision of the returned timestamp is same as the precision set for the current database in use. +- The input string must be compatible with ISO8601/RFC3339 standard, NULL will be returned if the string can't be converted +- The precision of the returned timestamp is same as the precision set for the current data base in use + -### DateTime Functions +### Time and Date Functions -DateTime functions applied to timestamp data. NOW(), TODAY() and TIMEZONE() are executed only once even though they may occur multiple times in a single SQL statement. +These functions perform operations on times and dates. + +All functions that return the current time, such as `NOW`, `TODAY`, and `TIMEZONE`, are calculated only once per statement even if they appear multiple times. #### NOW @@ -494,61 +491,66 @@ SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior NOW() INSERT INTO tb_name VALUES (NOW(), ...); ``` -**Description**: The current time of the client side system. +**Description**: The current time of the client side system -**Return value type**: TIMESTAMP. +**Return value type**: TIMESTAMP -**Applicable data types**: TIMESTAMP only if used in WHERE/INSERT clause. +**Applicable column types**: TIMESTAMP only -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables + +**Nested query**: It can be used in both the outer query and inner query in a nested query. **More explanations**: -- Addition and Subtraction operation with time duration can be performed, for example NOW() + 1s, the time unit can be one of the followings: - b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), w(week). -- The precision of the returned timestamp is same as the precision set for the current database in use. +- Add and Subtract operation can be performed, for example NOW() + 1s, the time unit can be: + b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), w(week) +- The precision of the returned timestamp is same as the precision set for the current data base in use + #### TIMEDIFF ```sql -SELECT TIMEDIFF(ts1 | datetime_string1, ts2 | datetime_string2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause]; +SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The difference(duration) between two timestamps, and rounded to the time unit specified by `time_unit`. +**Description**: The difference between two timestamps, and rounded to the time unit specified by `time_unit` -**Return value type**: BIGINT. +**Return value type**: BIGINT -**Applicable data types**: INTEGER/TIMESTAMP represents UNIX timestamp, or VARCHAR/NCHAR string in date/time format. +**Applicable column types**: UNIX-style timestamps in BIGINT and TIMESTAMP format and other timestamps in VARCHAR and NCHAR format -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: +**Nested query**: It can be used in both the outer query and inner query in a nested query. +**More explanations**: - Time unit specified by `time_unit` can be: - 1b(nanosecond), 1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day),1w(week). -- If `time_unit` parameter is not specified, the precision of the returned time duration is same as the precision set for the current database in use. -- If input date-time string cannot be converted to UNIX timestamp, NULL value is returned. + 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) +- The precision of the returned timestamp is same as the precision set for the current data base in use +- If the input data is not formatted as a timestamp, the returned value is null. + #### TIMETRUNCATE ```sql -SELECT TIMETRUNCATE(ts_val | datetime_string | ts_col, time_unit) FROM { tb_name | stb_name } [WHERE clause]; +SELECT TIMETRUNCATE(ts | datetime_string , time_unit) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: Truncate the input timestamp with unit specified by `time_unit`. +**Description**: Truncate the input timestamp with unit specified by `time_unit` -**Return value type**: TIMESTAMP. +**Return value type**: TIMESTAMP -**Applicable data types**: INTEGER/TIMESTAMP represents UNIX timestamp, or VARCHAR/NCHAR string in date/time format. +**Applicable column types**: UNIX-style timestamps in BIGINT and TIMESTAMP format and other timestamps in VARCHAR and NCHAR format -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables **More explanations**: - - Time unit specified by `time_unit` can be: - 1b(nanosecond),1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day),1w(week). -- The precision of the returned timestamp is same as the precision set for the current database in use. -- If input date-time string cannot be converted to UNIX timestamp, NULL value is returned. + 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) +- The precision of the returned timestamp is same as the precision set for the current data base in use +- If the input data is not formatted as a timestamp, the returned value is null. + #### TIMEZONE @@ -556,13 +558,14 @@ SELECT TIMETRUNCATE(ts_val | datetime_string | ts_col, time_unit) FROM { tb_name SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The timezone of the client side system. +**Description**: The timezone of the client side system + +**Applicable data types**: VARCHAR -**Return value type**: VARCHAR. +**Applicable column types**: None -**Applicable data types**: None. +**Applicable table types**: standard tables and supertables -**Applicable table types**: table, STable. #### TODAY @@ -572,269 +575,269 @@ SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior TODAY INSERT INTO tb_name VALUES (TODAY(), ...); ``` -**Description**: The timestamp of 00:00:00 of the client side system. +**Description**: The timestamp of 00:00:00 of the client side system -**Return value type**: TIMESTAMP. +**Return value type**: TIMESTAMP -**Applicable data types**: TIMESTAMP only if used in WHERE/INSERT clause. +**Applicable column types**: TIMESTAMP only -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables **More explanations**: -- Addition and Subtraction operation can be performed with time durations, for example NOW() + 1s, the time unit can be: - b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), w(week). -- The precision of the returned timestamp is same as the precision set for the current database in use. +- Add and Subtract operation can be performed, for example TODAY() + 1s, the time unit can be: + b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), w(week) +- The precision of the returned timestamp is same as the precision set for the current data base in use + ## Aggregate Functions -Aggregate functions return single result row for each group in the query result set. Groups are determined by `GROUP BY` clause or time window clause if they are used; or the whole result is considered a group if neither of them is used. +Aggregate functions return one row per group. You can use windows or GROUP BY to group data. Otherwise, the entire query is considered a single group. + +TDengine supports the following aggregate functions: ### APERCENTILE -``` -SELECT APERCENTILE(field_name, P[, algo_type]) -FROM { tb_name | stb_name } [WHERE clause] +```sql +SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: Similar to `PERCENTILE`, but a approximated result is returned. +**Description**: Similar to `PERCENTILE`, but a simulated result is returned -**Return value type**: DOUBLE. +**Return value type**: DOUBLE -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. - -**More explanations** +**Applicable table types**: standard tables and supertables +**Explanations**: - _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX. -- **algo_type** can only be input as `default` or `t-digest`, if it's not specified `default` will be used, i.e. `apercentile(column_name, 50)` is same as `apercentile(column_name, 50, "default")`. -- If `default` is used, histogram based algorithm is used for calculation. If `t-digest` is used, `t-digest` sampling algorithm is used to calculate the result. - -**Nested query**: It can be used in both the outer query and inner query in a nested query. +- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default. ### AVG -``` +```sql SELECT AVG(field_name) FROM tb_name [WHERE clause]; ``` -**Description**: Get the average value of a column in a table or STable. +**Description**: The average value of the specified fields. + +**Return value type**: DOUBLE -**Return value type**: DOUBLE. +**Applicable data types**: Numeric -**Applicable data types**: Numeric type. +**Applicable table types**: standard tables and supertables -**Applicable table types**: table, STable. ### COUNT -``` +```sql SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause]; ``` -**Description**: Get the number of rows in a table or a super table. +**Description**: The number of records in the specified fields. -**Return value type**: BIGINT. +**Return value type**: BIGINT -**Applicable data types**: All data types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables **More explanation**: -- Wildcard (\*) is used to represent all columns. If \* used `COUNT` function will get the total number of all rows. -- The number of non-NULL values will be returned if this function is used on a specific column. +- Wildcard (\*) is used to represent all columns. +If you input a specific column, the number of non-null values in the column is returned. + ### ELAPSED -```mysql -SELECT ELAPSED(field_name[, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]]; +```sql +SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]]; ``` -**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calcualted time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time duration within the specified time range. Please be noted that the calculated time duration is in the specified `time_unit`. +**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calcualted time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length. -**Return value type**:DOUBLE. +**Return value type**: Double if the input value is not NULL; -**Applicable data type**:TIMESTAMP. +**Return value type**: TIMESTAMP -**Applicable tables**: table, STable, outter in nested query. +**Applicable tables**: table, STable, outter in nested query **Explanations**: - - `field_name` parameter can only be the first column of a table, i.e. timestamp primary key. -- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default ime unit. +- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default time unit. Time unit specified by `time_unit` can be: + 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) - It can be used with `INTERVAL` to get the time valid time length of each time window. Please be noted that the return value is same as the time window for all time windows except for the first and the last time window. - `order by asc/desc` has no effect on the result. - `group by tbname` must be used together when `elapsed` is used against a STable. - `group by` must NOT be used together when `elapsed` is used against a table or sub table. -- When used in nested query, it's only applicable when the inner query outputs an implicit timestamp column as the primary key. For example, `select elapsed(ts) from (select diff(value) from sub1)` is legal usage while `select elapsed(ts) from (select * from sub1)` is not. -- It cannot be used with `leastsquares`, `diff`, `derivative`, `top`, `bottom`, `last_row`, `interp`. +- When used in nested query, it's only applicable when the inner query outputs an implicit timestamp column as the primary key. For example, `select elapsed(ts) from (select diff(value) from sub1)` is legal usage while `select elapsed(ts) from (select * from sub1)` is not. In addition, because elapsed has a strict dependency on the timeline, a statement like `select elapsed(ts) from (select diff(value) from st group by tbname) will return a meaningless result. +- It can't be used with `leastsquares`, `diff`, `derivative`, `top`, `bottom`, `last_row`, `interp`. + ### LEASTSQUARES -``` +```sql SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]; ``` **Description**: The linear regression function of the specified column and the timestamp column (primary key), `start_val` is the initial value and `step_val` is the step value. -**Return value type**: VARCHAR string in the format of "(slope, intercept)". +**Return value type**: A string in the format of "(slope, intercept)" -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric + +**Applicable table types**: table only -**Applicable table types**: table only. ### SPREAD -``` +```sql SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The difference between the max and the min value of a specific column. +**Description**: The difference between the max and the min of a specific column -**Return value type**: DOUBLE. +**Return value type**: DOUBLE -**Applicable data types**: Numeric types. +**Applicable data types**: Integers and timestamps -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: Can be used on a column of TIMESTAMP type, the result time unit precision is same as the current database in use. ### STDDEV -``` +```sql SELECT STDDEV(field_name) FROM tb_name [WHERE clause]; ``` -**Description**: Standard deviation of a specific column in a table or STable. +**Description**: Standard deviation of a specific column in a table or STable + +**Return value type**: DOUBLE -**Return value type**: DOUBLE. +**Applicable data types**: Numeric -**Applicable data types**: Numeric types. +**Applicable table types**: standard tables and supertables -**Applicable table types**: table, STable. ### SUM -``` +```sql SELECT SUM(field_name) FROM tb_name [WHERE clause]; ``` -**Description**: The summation of values of a specific column in a table or STable. +**Description**: The sum of a specific column in a table or STable -**Return value type**: DOUBLE. +**Return value type**: DOUBLE or BIGINT -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric + +**Applicable table types**: standard tables and supertables -**Applicable table types**: table, STable. ### HYPERLOGLOG -``` +```sql SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**:The cardinal number of a specific column is returned by using hyperloglog algorithm. +**Description**: + The cardinal number of a specific column is returned by using hyperloglog algorithm. The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge. + However, when the data volume is very small, the result may be not accurate, it's recommented to use `select count(data) from (select unique(col) as data from table)` in this case. + +**Return value type**: Integer -**Return value type**: INTEGER. +**Applicable data types**: Numeric -**Applicable data types**: All data types. +**Applicable table types**: standard tables and supertables -**More explanations**: The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge. However, when the data volume is very small, the result may be not accurate, it's recommented to use `select count(data) from (select unique(col) as data from table)` in this case. ### HISTOGRAM -``` +```sql SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_name [WHERE clause]; ``` **Description**:Returns count of data points in user-specified ranges. -**Return value type**:DOUBLE or BIGINT, depends on normalized parameter settings. +**Return value type** If normalized is set to 1, a DOUBLE is returned; otherwise a BIGINT is returned -**Applicable data type**:Numerical types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: table, STable **Explanations**: - - bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。 -- bin_description: parameter to describe the rule to generate buckets,can be in the following JSON formats for each bin_type respectively: - - - "user_input": "[1, 3, 5, 7]": User specified bin values. - - - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" - "start" - bin starting point. - "width" - bin offset. - "count" - number of bins generated. - "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins. - The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]. - - - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" - "start" - bin starting point. - "factor" - exponential factor of bin offset. - "count" - number of bins generated. - "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins. - The above "log_bin" descriptor generates a set of bins:[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]. +- bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively: + - "user_input": "[1, 3, 5, 7]": + User specified bin values. + + - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" + "start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins. + The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]. + + - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" + "start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins. + The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]. +- normalized: setting to 1/0 to turn on/off result normalization. Valid values are 0 or 1. -- normalized: setting to 1/0 to turn on/off result normalization. ### PERCENTILE -``` +```sql SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause]; ``` **Description**: The value whose rank in a specific column matches the specified percentage. If such a value matching the specified percentage doesn't exist in the column, an interpolation value will be returned. -**Return value type**: DOUBLE. +**Return value type**: DOUBLE -**Applicable data types**: Numeric types. +**Applicable column types**: Numeric -**Applicable table types**: table. +**Applicable table types**: table only **More explanations**: _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX. -## Selector Functions -Selector functiosn choose one or more rows in the query result according to the semantics. You can specify to output primary timestamp column and other columns including tbname and tags so that you can easily know which rows the selected values belong to. +## Selection Functions + +Selection functions return one or more results depending. You can specify the timestamp column, tbname pseudocolumn, or tag columns to show which rows contain the selected value. ### BOTTOM -``` +```sql SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; ``` **Description**: The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly. -**Return value type**: Same as the column being operated upon. +**Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: +**More explanation**: -- _k_ must be in range [1,100]. -- The timestamp associated with the selected values are returned too. -- Can't be used with `FILL`. +- _k_ must be in range [1,100] +- The timestamp associated with the selected values are returned too +- Can't be used with `FILL` ### FIRST -``` +```sql SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The first non-null value of a specific column in a table or STable. +**Description**: The first non-null value of a specific column in a table or STable -**Return value type**: Same as the column being operated upon. +**Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: All data types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: +**More explanation**: - FIRST(\*) can be used to get the first non-null value of all columns - NULL will be returned if all the values of the specified column are all NULL @@ -842,225 +845,231 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]; ### INTERP -``` -SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]; +```sql +SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] RANGE(timestamp1,timestamp2) EVERY(interval) FILL({ VALUE | PREV | NULL | LINEAR | NEXT}); ``` **Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned. -**Return value type**: Same as the column being operated upon. +**Return value type**: Same as the column being operated upon -**Applicable data types**: Numeric data types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable, nested query. +**Applicable table types**: standard tables and supertables **More explanations** - `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter. - The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input. -- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. If `RANGE` is not specified, then the timestamp of the first row that matches the filter condition is treated as timestamp1, the timestamp of the last row that matches the filter condition is treated as timestamp2. -- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. If `EVERY` parameter is not used, the time windows will be considered as no ending timestamp, i.e. there is only one time window from timestamp1. -- Interpolation is performed based on `FILL` parameter. No interpolation is performed if `FILL` is not used, that means either the original data that matches is returned or nothing is returned. -- `INTERP` can only be used to interpolate in single timeline. So it must be used with `group by tbname` when it's used on a STable. It can't be used with `GROUP BY` when it's used in the inner query of a nested query. -- The result of `INTERP` is not influenced by `ORDER BY TIMESTAMP`, which impacts the output order only.. +- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. +- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. +- Interpolation is performed based on `FILL` parameter. +- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable. ### LAST -``` +```sql SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The last non-NULL value of a specific column in a table or STable. +**Description**: The last non-NULL value of a specific column in a table or STable -**Return value type**: Same as the column being operated upon. +**Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: All data types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: +**More explanation**: - LAST(\*) can be used to get the last non-NULL value of all columns - If the values of a column in the result set are all NULL, NULL is returned for that column; if all columns in the result are all NULL, no result will be returned. - When it's used on a STable, if there are multiple values with the timestamp in the result set, one of them will be returned randomly and it's not guaranteed that the same value is returned if the same query is run multiple times. + ### LAST_ROW -``` +```sql SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }; ``` -**Description**: The last row of a table or STable. +**Description**: The last row of a table or STable -**Return value type**: Same as the column being operated upon. +**Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: All data type. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables **More explanations**: -- When it's used against a STable, multiple rows with the same and largest timestamp may exist, in this case one of them is returned randomly and it's not guaranteed that the result is same if the query is run multiple times. -- Cannot be used with `INTERVAL`. +- When it's used on a STable, if there are multiple values with the timestamp in the result set, one of them will be returned randomly and it's not guaranteed that the same value is returned if the same query is run multiple times. +- Can't be used with `INTERVAL`. ### MAX -``` +```sql SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The maximum value of a specific column of a table or STable. +**Description**: The maximum value of a specific column of a table or STable -**Return value type**: Same as the data type of the column being operated upon. +**Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric + +**Applicable table types**: standard tables and supertables -**Applicable table types**: table, STable. ### MIN -``` +```sql SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]; ``` -**Description**: The minimum value of a specific column in a table or STable. +**Description**: The minimum value of a specific column in a table or STable -**Return value type**: Same as the data type of the column being operated upon. +**Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric + +**Applicable table types**: standard tables and supertables -**Applicable table types**: table, STable. ### MODE -``` +```sql SELECT MODE(field_name) FROM tb_name [WHERE clause]; ``` **Description**:The value which has the highest frequency of occurrence. NULL is returned if there are multiple values which have highest frequency of occurrence. -**Return value type**:Same as the data type of the column being operated upon. +**Return value type**: Same as the input data + +**Applicable data types**: Numeric -**Applicable data types**: All data types. +**Applicable table types**: standard tables and supertables -**More explanations**:Considering the number of returned result set is unpredictable, it's suggested to limit the number of unique values to 100,000, otherwise error will be returned. ### SAMPLE ```sql - SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause] +SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause] ``` **Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,1000]. -**Return value type**: Same as the column being operated. +**Return value type**: Same as the column being operated plus the associated timestamp -**Applicable data types**: All data types. +**Applicable data types**: Any data type except for tags of STable -**Applicable table types**: table, STable. +**Applicable nested query**: Inner query and Outer query -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables -**More explanations**: +**More explanations**: + +This function cannot be used in expression calculation. +- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline -- Arithmetic operation cannot be operated on the result of `SAMPLE` function -- Must be used with `Partition by tbname` when it's used on a STable to force the result on each single timeline. ### TAIL -``` +```sql SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause]; ``` **Description**: The next _k_ rows are returned after skipping the last `offset_val` rows, NULL values are not ignored. `offset_val` is optional parameter. When it's not specified, the last _k_ rows are returned. When `offset_val` is used, the effect is same as `order by ts desc LIMIT k OFFSET offset_val`. -**Parameter value range**: k: [1,100] offset_val: [0,100]. +**Parameter value range**: k: [1,100] offset_val: [0,100] + +**Return value type**:Same as the data type of the column being operated upon + +**Applicable data types**: Any data type except for timestamp, i.e. the primary key -**Return value type**: Same as the column being operated upon. +**Applicable table types**: standard tables and supertables -**Applicable data types**: All data types. ### TOP -``` +```sql SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; ``` **Description**: The greatest _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly. -**Return value type**: Same as the column being operated upon. +**Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: +**More explanation**: -- _k_ must be in range [1,100]. -- The timestamp associated with the selected values are returned too. -- Cannot be used with `FILL`. +- _k_ must be in range [1,100] +- The timestamp associated with the selected values are returned too +- Can't be used with `FILL` ### UNIQUE -``` +```sql SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause]; ``` -**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. +**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. The first occurrence of a timestamp or tag is used. -**Return value type**: Same as the column or tag being operated upon. +**Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: All data types. +**Applicable column types**: Any data types except for timestamp -**More explanations**: +**Applicable table types**: table, STable -- It can be used against table or STable, but can't be used together with time window, like `interval`, `state_window` or `session_window` . -- Considering the number of result sets is unpredictable, it's suggested to limit the distinct values under 100,000 to control the memory usage, otherwise error will be returned. -## Time-Series Specific Functions +## Time-Series Extensions -TDengine provides a set of time-series specific functions to better meet the requirements in querying time-series data. In general databases, similar functionalities can only be achieved with much more complex syntax and much worse performance. TDengine provides these functionalities in builtin functions so that the burden on user side is minimized. +TDengine includes extensions to standard SQL that are intended specifically for time-series use cases. The functions enabled by these extensions require complex queries to implement in general-purpose databases. By offering them as built-in extensions, TDengine reduces user workload. ### CSUM ```sql - SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause] +SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` **Description**: The cumulative sum of each row for a specific column. The number of output rows is same as that of the input rows. -**Return value type**: BIGINT for signed integer input types; UNSIGNED BIGINT for unsigned integer input types; DOUBLE for floating point input types. +**Return value type**: Long integer for integers; Double for floating points. uint64_t for unsigned integers -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable nested query**: Inner query and Outer query. +**Applicable table types**: standard tables and supertables + +**More explanations**: + +- Arithmetic operation can't be performed on the result of `csum` function +- Can only be used with aggregate functions This function can be used with supertables and standard tables. +- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline -**More explanations**: -- Arithmetic operation cannot be performed on the result of `csum` function. -- Can only be used with aggregate functions. -- `Partition by tbname` must be used together on a STable to force the result on a single timeline. ### DERIVATIVE -``` +```sql SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause]; ``` **Description**: The derivative of a specific column. The time rage can be specified by parameter `time_interval`, the minimum allowed time range is 1 second (1s); the value of `ignore_negative` can be 0 or 1, 1 means negative values are ignored. -**Return value type**: DOUBLE. +**Return value type**: DOUBLE -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: - -- The number of result rows is the number of total rows in the time range subtracted by one, no output for the first row. +**More explanation**: + - It can be used together with `PARTITION BY tbname` against a STable. -- Can be used together with selection of relative columns. E.g. select \_rowts, DERIVATIVE() from. +- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from。 ### DIFF @@ -1070,159 +1079,159 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER **Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored. -**Return value type**: Same as the column being operated upon. +**Return value type**:Same as the data type of the column being operated upon -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: +**More explanation**: + +- The number of result rows is the number of rows subtracted by one, no output for the first row +- It can be used together with a selected column. For example: select \_rowts, DIFF() from。 -- The number of result rows is the number of rows subtracted by one, no output for the first row. -- It can be used on STable with `PARTITION by tbname`. -- Can be used together with selection of relative columns. E.g. select \_rowts, DIFF() from. ### IRATE -``` +```sql SELECT IRATE(field_name) FROM tb_name WHERE clause; ``` **Description**: instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values. -**Return value type**: DOUBLE. +**Return value type**: DOUBLE -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: - -- It can be used on stble with `PARTITION BY`, i.e. timelines generated by `PARTITION BY tbname` on a STable. ### MAVG ```sql - SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause] +SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause] ``` **Description**: The moving average of continuous _k_ values of a specific column. If the number of input rows is less than _k_, nothing is returned. The applicable range of _k_ is [1,1000]. -**Return value type**: DOUBLE. +**Return value type**: DOUBLE -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable nested query**: Inner query and Outer query. +**Nested query**: It can be used in both the outer query and inner query in a nested query. -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: +**More explanations**: + +- Arithmetic operation can't be performed on the result of `MAVG`. +- Can only be used with data columns, can't be used with tags. - Can't be used with aggregate functions. +- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline -- Arithmetic operation cannot be performed on the result of `MAVG`. -- Cannot be used with aggregate functions. -- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline. ### STATECOUNT -``` +```sql SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The number of continuous rows satisfying the specified conditions for a specific column. If the specified condition is evaluated as true, the number is increased by 1; otherwise the number is reset to -1. If the input value is NULL, then the corresponding row is skipped. +**Description**: The number of continuous rows satisfying the specified conditions for a specific column. The result is shown as an extra column for each row. If the specified condition is evaluated as true, the number is increased by 1; otherwise the number is reset to -1. If the input value is NULL, then the corresponding row is skipped. **Applicable parameter values**: -- oper : Can be one of "LT" (lower than), "GT" (greater than), "LE" (lower than or euqal to), "GE" (greater than or equal to), "NE" (not equal to), "EQ" (equal to). -- val : Numeric types. +- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes. +- val : Numeric types -**Return value type**: INTEGER. +**Return value type**: Integer -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable nested query**: Outer query only -**Applicable nested query**: Outer query only. +**Applicable table types**: standard tables and supertables **More explanations**: -- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline. -- Cannot be used with window operation, like interval/state_window/session_window. +- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline] +- Can't be used with window operation, like interval/state_window/session_window + ### STATEDURATION -``` +```sql SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The length of time range in which all rows satisfy the specified condition for a specific column. The length for the first row that satisfies the condition is 0. Next, if the condition is evaluated as true for a row, the time interval between current row and its previous row is added up to the time range; otherwise the time range length is reset to -1. If the value of the column is NULL, the corresponding row is skipped. +**Description**: The length of time range in which all rows satisfy the specified condition for a specific column. The result is shown as an extra column for each row. The length for the first row that satisfies the condition is 0. Next, if the condition is evaluated as true for a row, the time interval between current row and its previous row is added up to the time range; otherwise the time range length is reset to -1. If the value of the column is NULL, the corresponding row is skipped. **Applicable parameter values**: -- oper : Can be one of "LT" (lower than), "GT" (greater than), "LE" (lower than or euqal to), "GE" (greater than or equal to), "NE" (not equal to), "EQ" (equal to). -- val : Numeric types. -- unit : The unit of time interval, can be: 1b(nanosecond), 1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day),1w(week). If not specified, default is same as the current database time precision in use. +- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes. +- val : Numeric types +- unit: The unit of time interval. Enter one of the following options: 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) If you do not enter a unit of time, the precision of the current database is used by default. -**Return value type**: INTEGER. +**Return value type**: Integer -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable nested query**: Outer query only -**Applicable nested query**: Outer query only. +**Applicable table types**: standard tables and supertables **More explanations**: -- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline. -- Cannot be used with window operation, like interval/state_window/session_window. +- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline] +- Can't be used with window operation, like interval/state_window/session_window + ### TWA -``` +```sql SELECT TWA(field_name) FROM tb_name WHERE clause; ``` -**Description**: Time weighted average on a specific column within a time range. +**Description**: Time weighted average on a specific column within a time range -**Return value type**: DOUBLE. +**Return value type**: DOUBLE -**Applicable data types**: Numeric types. +**Applicable data types**: Numeric -**Applicable table types**: table, STable. +**Applicable table types**: standard tables and supertables -**More explanations**: +- Must be used together with `PARTITION BY tbname` to force the result into each single timeline. -- It can be used on stable with `PARTITION BY`, i.e. timelines generated by `PARTITION BY tbname` on a STable. ## System Information Functions ### DATABASE -``` +```sql SELECT DATABASE(); ``` -**Description**:Return the current database being used. If the user doesn't specify database when logon and doesn't use `USE` SQL command to switch the datbase, this function returns NULL. +**Description**: The current database. If no database is specified upon logging in and no database is specified with `USE` after login, NULL will be returned by `select database()`. + ### CLIENT_VERSION -``` +```sql SELECT CLIENT_VERSION(); ``` -**Description**:Return the client version. +**Description**: The client version. ### SERVER_VERSION -``` +```sql SELECT SERVER_VERSION(); ``` -**Description**:Returns the server version. +**Description**: The server version. ### SERVER_STATUS -``` -SELECT SERVER_VERSION(); +```sql +SELECT SERVER_STATUS(); ``` -**Description**:Returns the server's status. +**Description**: The server status. diff --git a/docs/en/12-taos-sql/12-distinguished.md b/docs/en/12-taos-sql/12-distinguished.md index d2f7cf66b63521d157a6e05f1dd8d93658d65549..707089abe54fc12bb09de47c1c51af1a32b8cbcd 100644 --- a/docs/en/12-taos-sql/12-distinguished.md +++ b/docs/en/12-taos-sql/12-distinguished.md @@ -1,60 +1,35 @@ --- -sidebar_label: Distinguished -title: Distinguished Query for Time Series Database +sidebar_label: Time-Series Extensions +title: Time-Series Extensions --- -Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. -Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window. +As a purpose-built database for storing and processing time-series data, TDengine provides time-series-specific extensions to standard SQL. -## Time Window +These extensions include tag-partitioned queries and windowed queries. -The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window. - -![TDengine Database Time Window](./timewindow-1.webp) +## Tag-Partitioned Queries -`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. The SQL statement below is illegal because no aggregate or selection function is used with `INTERVAL`. +When you query a supertable, you may need to partition the supertable by tag and perform additional operations on a specific partition. In this case, you can use the following SQL clause: +```sql +PARTITION BY part_list ``` -SELECT * FROM temp_tb_1 INTERVAL(1m); -``` - -The time step specified by `SLIDING` cannot exceed the time interval specified by `INTERVAL`. The SQL statement below is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`. - -``` -SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m); -``` - -When the time length specified by `SLIDING` is the same as that specified by `INTERVAL`, the sliding window is actually a flip/tumbling window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. Since version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side. -## Status Window +part_list can be any scalar expression, such as a column, constant, scalar function, or a combination of the preceding items. -In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now. - -![TDengine Database Status Window](./timewindow-3.webp) - -`STATE_WINDOW` is used to specify the column on which the status window will be based. For example: - -``` -SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); -``` +A PARTITION BY clause with a tag is processed as follows: -## Session Window +- The PARTITION BY clause must occur after the WHERE clause and cannot be used with a JOIN clause. +- The PARTITION BY clause partitions the super table by the specified tag group, and the specified calculation is performed on each partition. The calculation performed is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause. +- You can use PARTITION BY together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value: ```sql -SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val); +select max(current) from meters partition by location interval(10m) ``` -The primary key, i.e. timestamp, is used to determine which session window a row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different session windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds. - -![TDengine Database Session Window](./timewindow-2.webp) - -If the time interval between two continuous rows are within the time interval specified by `tol_value` they belong to the same session window; otherwise a new session window is started automatically. Session window is not supported on STable for now. - -## More On Window Aggregate +## Windowed Queries -### Syntax - -The full syntax of aggregate by window is as follows: +Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window. The query syntax is as follows: ```sql SELECT function_list FROM tb_name @@ -63,27 +38,45 @@ SELECT function_list FROM tb_name [STATE_WINDOW(col)] [INTERVAL(interval [, offset]) [SLIDING sliding]] [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] - -SELECT function_list FROM stb_name - [WHERE where_condition] - [INTERVAL(interval [, offset]) [SLIDING sliding]] - [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] - [GROUP BY tags] ``` -### Restrictions +The following restrictions apply: + +### Restricted Functions - Aggregate functions and select functions can be used in `function_list`, with each function having only one output. For example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple outputs, such as DIFF or arithmetic operations can't be used. - `LAST_ROW` can't be used together with window aggregate. - Scalar functions, like CEIL/FLOOR, can't be used with window aggregate. + +### Other Rules + +- The window clause must occur after the PARTITION BY clause and before the GROUP BY clause. It cannot be used with a GROUP BY clause. +- SELECT clauses on windows can contain only the following expressions: + - Constants + - Aggregate functions + - Expressions that include the preceding expressions. +- The window clause cannot be used with a GROUP BY clause. - `WHERE` clause can be used to specify the starting and ending time and other filter conditions -- `FILL` clause is used to specify how to fill when there is data missing in any window, including: - 1. NONE: No fill (the default fill mode) - 2. VALUE:Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` - 3. PREV:Fill with the previous non-NULL value, `FILL(PREV)` - 4. NULL:Fill with NULL, `FILL(NULL)` - 5. LINEAR:Fill with the closest non-NULL value, `FILL(LINEAR)` - 6. NEXT:Fill with the next non-NULL value, `FILL(NEXT)` + + +### Window Pseudocolumns + +**\_WSTART, \_WEND, and \_WDURATION** + +The \_WSTART, \_WEND, and \_WDURATION pseudocolumns indicate the beginning, end, and duration of a window. + +These pseudocolumns occur after the aggregation clause. + +### FILL Clause + +`FILL` clause is used to specify how to fill when there is data missing in any window, including: + +1. NONE: No fill (the default fill mode) +2. VALUE:Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled. +3. PREV:Fill with the previous non-NULL value, `FILL(PREV)` +4. NULL:Fill with NULL, `FILL(NULL)` +5. LINEAR:Fill with the closest non-NULL value, `FILL(LINEAR)` +6. NEXT:Fill with the next non-NULL value, `FILL(NEXT)` :::info @@ -93,17 +86,66 @@ SELECT function_list FROM stb_name ::: -Aggregate by time window is also used in continuous query, please refer to [Continuous Query](/develop/continuous-query). +### Time Window + +There are two kinds of time windows: sliding window and flip time/tumbling window. + +The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window. + +![TDengine Database Time Window](./timewindow-1.webp) + +`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. The SQL statement below is illegal because no aggregate or selection function is used with `INTERVAL`. + +``` +SELECT * FROM temp_tb_1 INTERVAL(1m); +``` + +The time step specified by `SLIDING` cannot exceed the time interval specified by `INTERVAL`. The SQL statement below is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`. -## Examples +``` +SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m); +``` + +When using time windows, note the following: + +- The window length for aggregation depends on the value of INTERVAL. The minimum interval is 10 ms. You can configure a window as an offset from UTC 0:00. The offset cannot be smaler than the interval. You can use SLIDING to specify the length of time that the window moves forward. +Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side. +- The result set is in ascending order of timestamp when you aggregate by time window. + +### Status Window + +In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now. + +![TDengine Database Status Window](./timewindow-3.webp) + +`STATE_WINDOW` is used to specify the column on which the status window will be based. For example: + +``` +SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); +``` + +### Session Window + +The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds. + +![TDengine Database Session Window](./timewindow-2.webp) + +If the time interval between two continuous rows are within the time interval specified by `tol_value` they belong to the same session window; otherwise a new session window is started automatically. Session window is not supported on STable for now. + +``` + +SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val); +``` + +### Examples A table of intelligent meters can be created by the SQL statement below: -```sql +``` CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT); ``` -The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the SQL statement below, with missing values filled with the previous non-NULL values. +The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the SQL statement below, with missing values filled with the previous non-NULL values. The query statement is as follows: ``` SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters diff --git a/docs/en/12-taos-sql/13-tmq.md b/docs/en/12-taos-sql/13-tmq.md index 4d9c475a3829456916175d8a0518c47d67bc18ee..befab4f4f01e595564e93ffcfbb0723e13294af0 100644 --- a/docs/en/12-taos-sql/13-tmq.md +++ b/docs/en/12-taos-sql/13-tmq.md @@ -1,41 +1,34 @@ --- -sidebar_label: 消息队列 -title: 消息队列 +sidebar_label: Data Subscription +title: Data Subscription --- -TDengine 3.0.0.0 开始对消息队列做了大幅的优化和增强以简化用户的解决方案。 +The information in this document is related to the TDengine data subscription feature. -## 创建订阅主题 +## Create a Topic ```sql -CREATE TOPIC [IF NOT EXISTS] topic_name AS {subquery | DATABASE db_name | STABLE stb_name }; +CREATE TOPIC [IF NOT EXISTS] topic_name AS subquery; ``` -订阅主题包括三种:列订阅、超级表订阅和数据库订阅。 -**列订阅是**用 subquery 描述,支持过滤和标量函数和 UDF 标量函数,不支持 JOIN、GROUP BY、窗口切分子句、聚合函数和 UDF 聚合函数。列订阅规则如下: +You can use filtering, scalar functions, and user-defined scalar functions with a topic. JOIN, GROUP BY, windows, aggregate functions, and user-defined aggregate functions are not supported. The following rules apply to subscribing to a column: -1. TOPIC 一旦创建则返回结果的字段确定 -2. 被订阅或用于计算的列不可被删除、修改 -3. 列可以新增,但新增的列不出现在订阅结果字段中 -4. 对于 select \*,则订阅展开为创建时所有的列(子表、普通表为数据列,超级表为数据列加标签列) +1. The returned field is determined when the topic is created. +2. Columns to which a consumer is subscribed or that are involved in calculations cannot be deleted or modified. +3. If you add a column, the new column will not appear in the results for the subscription. +4. If you run `SELECT \*`, all columns in the subscription at the time of its creation are displayed. This includes columns in supertables, standard tables, and subtables. Supertables are shown as data columns plus tag columns. -**超级表订阅和数据库订阅**规则如下: -1. 被订阅主体的 schema 变更不受限 -2. 返回消息中 schema 是块级别的,每块的 schema 可能不一样 -3. 列变更后写入的数据若未落盘,将以写入时的 schema 返回 -4. 列变更后写入的数据若未已落盘,将以落盘时的 schema 返回 - -## 删除订阅主题 +## Delete a Topic ```sql DROP TOPIC [IF EXISTS] topic_name; ``` -此时如果该订阅主题上存在 consumer,则此 consumer 会收到一个错误。 +If a consumer is subscribed to the topic that you delete, the consumer will receive an error. -## 查看订阅主题 +## View Topics ## SHOW TOPICS @@ -43,24 +36,24 @@ DROP TOPIC [IF EXISTS] topic_name; SHOW TOPICS; ``` -显示当前数据库下的所有主题的信息。 +The preceding command displays all topics in the current database. -## 创建消费组 +## Create Consumer Group -消费组的创建只能通过 TDengine 客户端驱动或者连接器所提供的 API 创建。 +You can create consumer groups only through the TDengine Client driver or the API provided by a connector. -## 删除消费组 +## Delete Consumer Group ```sql DROP CONSUMER GROUP [IF EXISTS] cgroup_name ON topic_name; ``` -删除主题 topic_name 上的消费组 cgroup_name。 +This deletes the cgroup_name in the topic_name. -## 查看消费组 +## View Consumer Groups ```sql SHOW CONSUMERS; ``` -显示当前数据库下所有活跃的消费者的信息。 +The preceding command displays all consumer groups in the current database. diff --git a/docs/en/12-taos-sql/14-stream.md b/docs/en/12-taos-sql/14-stream.md index 7ff7da2bfb82e282cefb1a554283860d0e683de2..fcd78765104af17285b43749969821ceb98da33b 100644 --- a/docs/en/12-taos-sql/14-stream.md +++ b/docs/en/12-taos-sql/14-stream.md @@ -1,13 +1,13 @@ --- -sidebar_label: 流式计算 -title: 流式计算 +sidebar_label: Stream Processing +title: Stream Processing --- -在时序数据的处理中,经常要对原始数据进行清洗、预处理,再使用时序数据库进行长久的储存。用户通常需要在时序数据库之外再搭建 Kafka、Flink、Spark 等流计算处理引擎,增加了用户的开发成本和维护成本。 +Raw time-series data is often cleaned and preprocessed before being permanently stored in a database. Stream processing components like Kafka, Flink, and Spark are often deployed alongside a time-series database to handle these operations, increasing system complexity and maintenance costs. -使用 TDengine 3.0 的流式计算引擎能够最大限度的减少对这些额外中间件的依赖,真正将数据的写入、预处理、长期存储、复杂分析、实时计算、实时报警触发等功能融为一体,并且,所有这些任务只需要使用 SQL 完成,极大降低了用户的学习成本、使用成本。 +Because stream processing is built in to TDengine, you are no longer reliant on middleware. TDengine offers a unified platform for writing, preprocessing, permanent storage, complex analysis, and real-time computation and alerting. Additionally, you can use SQL to perform all these tasks. -## 创建流式计算 +## Create a Stream ```sql CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name AS subquery @@ -18,7 +18,7 @@ stream_options: { ``` -其中 subquery 是 select 普通查询语法的子集: +The subquery is a subset of standard SELECT query syntax: ```sql subquery: SELECT [DISTINCT] select_list @@ -26,97 +26,74 @@ subquery: SELECT [DISTINCT] select_list [WHERE condition] [PARTITION BY tag_list] [window_clause] - [group_by_clause] ``` -不支持 order_by,limit,slimit,fill 语句 +Session windows, state windows, and sliding windows are supported. When you configure a session or state window for a supertable, you must use PARTITION BY TBNAME. -例如,如下语句创建流式计算,同时自动创建名为 avg_vol 的超级表,此流计算以一分钟为时间窗口、30 秒为前向增量统计这些电表的平均电压,并将来自 meters 表的数据的计算结果写入 avg_vol 表,不同 partition 的数据会分别创建子表并写入不同子表。 +```sql +window_clause: { + SESSION(ts_col, tol_val) + | STATE_WINDOW(col) + | INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] +} +``` + +`SESSION` indicates a session window, and `tol_val` indicates the maximum range of the time interval. If the time interval between two continuous rows are within the time interval specified by `tol_val` they belong to the same session window; otherwise a new session window is started automatically. + +For example, the following SQL statement creates a stream and automatically creates a supertable named `avg_vol`. The stream has a 1 minute time window that slides forward in 30 second intervals to calculate the average voltage of the meters supertable. ```sql CREATE STREAM avg_vol_s INTO avg_vol AS SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s); ``` -## 删除流式计算 +## Delete a Stream ```sql DROP STREAM [IF NOT EXISTS] stream_name ``` -仅删除流式计算任务,由流式计算写入的数据不会被删除。 +This statement deletes the stream processing service only. The data generated by the stream is retained. -## 展示流式计算 +## View Streams ```sql SHOW STREAMS; ``` -## 流式计算的触发模式 - -在创建流时,可以通过 TRIGGER 指令指定流式计算的触发模式。 - -对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 3 种触发模式: - -1. AT_ONCE:写入立即触发 - -2. WINDOW_CLOSE:窗口关闭时触发(窗口关闭由事件时间决定,可配合 watermark 使用,详见《流式计算的乱序数据容忍策略》) - -3. MAX_DELAY time:若窗口关闭,则触发计算。若窗口未关闭,且未关闭时长超过 max delay 指定的时间,则触发计算。 - -由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。 +## Trigger Stream Processing -因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。 +When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it. -MAX_DELAY 模式在窗口关闭时会立即触发计算。此外,当数据写入后,计算触发的时间超过 max delay 指定的时间,则立即触发计算 +For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering: -## 流式计算的乱序数据容忍策略 +1. AT_ONCE: triggers on write -在创建流时,可以在 stream_option 中指定 watermark。 +2. WINDOW_CLOSE: triggers when the window closes. This is determined by the event time. You can use WINDOW_CLOSE together with `watermark`. For more information, see Stream Processing Strategy for Out-of-Order Data. -流式计算通过 watermark 来度量对乱序数据的容忍程度,watermark 默认为 0。 +3. MAX_DELAY: triggers when the window closes. If the window has not closed but the time elapsed exceeds MAX_DELAY, stream processing is also triggered. -T = 最新事件时间 - watermark +Because the window closing is determined by the event time, a delay or termination of an event stream will prevent the event time from being updated. This may result in an inability to obtain the latest results. -每批到来的数据都会以上述公式更新窗口关闭时间,并将窗口结束时间 < T 的所有打开的窗口关闭,若触发模式为 WINDOW_CLOSE 或 MAX_DELAY,则推送窗口聚合结果。 +For this reason, MAX_DELAY is provided as a way to ensure that processing occurs even if the window does not close. -流式计算的过期数据处理策略 -对于已关闭的窗口,再次落入该窗口中的数据被标记为过期数据,对于过期数据,流式计算提供两种处理方式: +MAX_DELAY also triggers when the window closes. Additionally, if a write occurs but the processing is not triggered before MAX_DELAY expires, processing is also triggered. -1. 直接丢弃:这是常见流式计算引擎提供的默认(甚至是唯一)计算模式 +## Stream Processing Strategy for Out-of-Order Data -2. 重新计算:从 TSDB 中重新查找对应窗口的所有数据并重新计算得到最新结果 +When you create a stream, you can specify a watermark in the `stream_option` parameter. -无论在哪种模式下,watermark 都应该被妥善设置,来得到正确结果(直接丢弃模式)或避免频繁触发重算带来的性能开销(重新计算模式)。 +The watermark is used to specify the tolerance for out-of-order data. The default value is 0. -## 流式计算的数据填充策略 +T = latest event time - watermark -TODO +The window closing time for each batch of data that arrives at the system is updated using the preceding formula, and all windows are closed whose closing time is less than T. If the triggering method is WINDOW_CLOSE or MAX_DELAY, the aggregate result for the window is pushed. -## 流式计算与会话窗口(session window) +Stream processing strategy for expired data +The data in expired windows is tagged as expired. TDengine stream processing provides two methods for handling such data: -```sql -window_clause: { - SESSION(ts_col, tol_val) - | STATE_WINDOW(col) - | INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [FILL(fill_mod_and_val)] -} -``` - -其中,SESSION 是会话窗口,tol_val 是时间间隔的最大范围。在 tol_val 时间间隔范围内的数据都属于同一个窗口,如果连续的两条数据的时间超过 tol_val,则自动开启下一个窗口。 +1. Drop the data. This is the default and often only handling method for most stream processing engines. -## 流式计算的监控与流任务分布查询 +2. Recalculate the data. In this method, all data in the window is reobtained from the database and recalculated. The latest results are then returned. -TODO - -## 流式计算的内存控制与存算分离 - -TODO - -## 流式计算的暂停与恢复 - -```sql -STOP STREAM stream_name; - -RESUME STREAM stream_name; -``` +In both of these methods, configuring the watermark is essential for obtaining accurate results (if expired data is dropped) and avoiding repeated triggers that affect system performance (if expired data is recalculated). diff --git a/docs/en/12-taos-sql/16-operators.md b/docs/en/12-taos-sql/16-operators.md index 0ca9ec49430a66384400bc41cd08562b3d5d28c7..c426e2879342e430c61c4f8133aa9f8186888941 100644 --- a/docs/en/12-taos-sql/16-operators.md +++ b/docs/en/12-taos-sql/16-operators.md @@ -5,62 +5,62 @@ title: Operators ## Arithmetic Operators -| # | **Operator** | **Data Types** | **Description** | -| --- | :----------: | -------------- | --------------------------------------------------------- | -| 1 | +, - | Numeric Types | Representing positive or negative numbers, unary operator | -| 2 | +, - | Numeric Types | Addition and substraction, binary operator | -| 3 | \*, / | Numeric Types | Multiplication and division, binary oeprator | -| 4 | % | Numeric Types | Taking the remainder, binary operator | +| # | **Operator** | **Supported Data Types** | **Description** | +| --- | :--------: | -------------- | -------------------------- | +| 1 | +, - | Numeric | Expresses sign. Unary operators. | +| 2 | +, - | Numeric | Expresses addition and subtraction. Binary operators. | +| 3 | \*, / | Numeric | Expresses multiplication and division. Binary operators. | +| 4 | % | Numeric | Expresses modulo. Binary operator. | ## Bitwise Operators -| # | **Operator** | **Data Types** | **Description** | -| --- | :----------: | -------------- | ----------------------------- | -| 1 | & | Numeric Types | Bitewise AND, binary operator | -| 2 | \| | Numeric Types | Bitewise OR, binary operator | +| # | **Operator** | **Supported Data Types** | **Description** | +| --- | :--------: | -------------- | ------------------ | +| 1 | & | Numeric | Bitwise AND. Binary operator. | +| 2 | \| | Numeric | Bitwise OR. Binary operator. | -## JSON Operator +## JSON Operators -`->` operator can be used to get the value of a key in a column of JSON type, the left oeprand is the column name, the right operand is a string constant. For example, `col->'name'` returns the value of key `'name'`. +The `->` operator returns the value for a key in JSON column. Specify the column indicator on the left of the operator and the key name on the right of the operator. For example, `col->name` returns the value of the name key. -## Set Operator +## Set Operators -Set operators are used to combine the results of two queries into single result. A query including set operators is called a combined query. The number of rows in each result in a combined query must be same, and the type is determined by the first query's result, the type of the following queriess result must be able to be converted to the type of the first query's result, the conversion rule is same as `CAST` function. +Set operators combine the results of two queries. Queries that include set operators are known as compound queries. The expressions corresponding to each query in the select list in a compound query must match in number. The results returned take the data type of the first query, and the data type returned by subsequent queries must be convertible into the data type of the first query. The conditions of the `CAST` function apply to this conversion. -TDengine provides 2 set operators: `UNION ALL` and `UNION`. `UNION ALL` combines the results without removing duplicate data. `UNION` combines the results and remove duplicate data rows. In single SQL statement, at most 100 set operators can be used. +TDengine supports the `UNION` and `UNION ALL` operations. UNION ALL collects all query results and returns them as a composite result without deduplication. UNION collects all query results and returns them as a deduplicated composite result. In a single SQL statement, at most 100 set operators can be supported. -## Comparsion Operator +## Comparison Operators -| # | **Operator** | **Data Types** | **Description** | -| --- | :---------------: | ------------------------------------------------------------------- | ----------------------------------------------- | -| 1 | = | Except for BLOB, MEDIUMBLOB and JSON | Equal | -| 2 | <\>, != | Except for BLOB, MEDIUMBLOB, JSON and primary key of timestamp type | Not equal | -| 3 | \>, < | Except for BLOB, MEDIUMBLOB and JSON | Greater than, less than | -| 4 | \>=, <= | Except for BLOB, MEDIUMBLOB and JSON | Greater than or equal to, less than or equal to | -| 5 | IS [NOT] NULL | Any types | Is NULL or NOT | -| 6 | [NOT] BETWEEN AND | Except for BLOB, MEDIUMBLOB and JSON | In a value range or not | -| 7 | IN | Except for BLOB, MEDIUMBLOB, JSON and primary key of timestamp type | In a list of values or not | -| 8 | LIKE | BINARY, NCHAR and VARCHAR | Wildcard matching | -| 9 | MATCH, NMATCH | BINARY, NCHAR and VARCHAR | Regular expression matching | -| 10 | CONTAINS | JSON | If A key exists in JSON | +| # | **Operator** | **Supported Data Types** | **Description** | +| --- | :---------------: | -------------------------------------------------------------------- | -------------------- | +| 1 | = | All types except BLOB, MEDIUMBLOB, and JSON | Equal to | +| 2 | <\>, != | All types except BLOB, MEDIUMBLOB, and JSON; the primary key (timestamp) is also not supported | Not equal to | +| 3 | \>, < | All types except BLOB, MEDIUMBLOB, and JSON | Greater than and less than | +| 4 | \>=, <= | All types except BLOB, MEDIUMBLOB, and JSON | Greater than or equal to and less than or equal to | +| 5 | IS [NOT] NULL | All types | Indicates whether the value is null | +| 6 | [NOT] BETWEEN AND | All types except BLOB, MEDIUMBLOB, and JSON | Closed interval comparison | +| 7 | IN | All types except BLOB, MEDIUMBLOB, and JSON; the primary key (timestamp) is also not supported | Equal to any value in the list | +| 8 | LIKE | BINARY, NCHAR, and VARCHAR | Wildcard match | +| 9 | MATCH, NMATCH | BINARY, NCHAR, and VARCHAR | Regular expression match | +| 10 | CONTAINS | JSON | Indicates whether the key exists | -`LIKE` operator uses wildcard to match a string, the rules are: +LIKE is used together with wildcards to match strings. Its usage is described as follows: -- '%' matches 0 to any number of characters; '\_' matches any single ASCII character. -- \_ can be used to match a `_` in the string, i.e. using escape character backslash `\` -- Wildcard string is 100 bytes at most. Longer a wildcard string is, worse the performance of LIKE operator is. +- '%' matches 0 or any number of characters, '\_' matches any single ASCII character. +- `\_` is used to match the \_ in the string. +- The maximum length of wildcard string is 100 bytes. A very long wildcard string may slowdown the execution performance of `LIKE` operator. -`MATCH` and `NMATCH` operators use regular expressions to match a string, the rules are: +MATCH and NMATCH are used together with regular expressions to match strings. Their usage is described as follows: -- Regular expressions of POSIX standard are supported. -- Only `tbname`, i.e. table name of sub tables, and tag columns of string types can be matched with regular expression, data columns are not supported. -- Regular expression string is 128 bytes at most, and can be adjusted by setting parameter `maxRegexStringLen`, which is a client side configuration and needs to restart the client to take effect. +- Use POSIX regular expression syntax. For more information, see Regular Expressions. +- Regular expression can be used against only table names, i.e. `tbname`, and tags of binary/nchar types, but can't be used against data columns. +- The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client. ## Logical Operators -| # | **Operator** | **Data Types** | **Description** | -| --- | :----------: | -------------- | ---------------------------------------------------------------------------------------- | -| 1 | AND | BOOL | Logical AND, return TRUE if both conditions are TRUE; return FALSE if any one is FALSE. | -| 2 | OR | BOOL | Logical OR, return TRUE if any condition is TRUE; return FALSE if both are FALSE | +| # | **Operator** | **Supported Data Types** | **Description** | +| --- | :--------: | -------------- | --------------------------------------------------------------------------- | +| 1 | AND | BOOL | Logical AND; if both conditions are true, TRUE is returned; If either condition is false, FALSE is returned. +| 2 | OR | BOOL | Logical OR; if either condition is true, TRUE is returned; If both conditions are false, FALSE is returned. -TDengine uses shortcircut optimization when performing logical operations. For AND operator, if the first condition is evaluated to FALSE, then the second one is not evaluated. For OR operator, if the first condition is evaluated to TRUE, then the second one is not evaluated. +TDengine performs short-path optimization when calculating logical conditions. If the first condition for AND is false, FALSE is returned without calculating the second condition. If the first condition for OR is true, TRUE is returned without calculating the second condition diff --git a/docs/en/12-taos-sql/17-json.md b/docs/en/12-taos-sql/17-json.md index 7460a5e0ba3ce78ee7744569cda460c477cac19c..77f774303316b466a15226f548f84da69be8f92d 100644 --- a/docs/en/12-taos-sql/17-json.md +++ b/docs/en/12-taos-sql/17-json.md @@ -1,60 +1,64 @@ --- +sidebar_label: JSON Type title: JSON Type --- + ## Syntax 1. Tag of type JSON - ```sql - create STable s1 (ts timestamp, v1 int) tags (info json); + ``` + create stable s1 (ts timestamp, v1 int) tags (info json) - create table s1_1 using s1 tags ('{"k1": "v1"}'); + create table s1_1 using s1 tags ('{"k1": "v1"}') ``` 2. "->" Operator of JSON - ```sql - select * from s1 where info->'k1' = 'v1'; + ``` + select * from s1 where info->'k1' = 'v1' - select info->'k1' from s1; + select info->'k1' from s1 ``` 3. "contains" Operator of JSON - ```sql - select * from s1 where info contains 'k2'; + ``` + select * from s1 where info contains 'k2' - select * from s1 where info contains 'k1'; + select * from s1 where info contains 'k1' ``` ## Applicable Operations 1. When a JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used. - ```sql + ``` select * from s1 where info->'k1' match 'v*'; select * from s1 where info->'k1' like 'v%' and info contains 'k2'; select * from s1 where info is null; - select * from s1 where info->'k1' is not null; + select * from s1 where info->'k1' is not null ``` 2. A tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query; for example `group by json->'key'` 3. `Distinct` can be used with a tag of type JSON - ```sql - select distinct info->'k1' from s1; + ``` + select distinct info->'k1' from s1 ``` 4. Tag Operations The value of a JSON tag can be altered. Please note that the full JSON will be overriden when doing this. - The name of a JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed. + The name of a JSON tag can be altered. + + A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed. ## Other Restrictions @@ -64,19 +68,24 @@ title: JSON Type - JSON format: - - The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array. - - object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so. - - value can be int, double, string, bool or NULL, and it can't be an array. Nesting is not allowed which means that the value of a key can't be JSON. - - If one key occurs twice in JSON, only the first one is valid. - - Escape characters are not allowed in JSON. + - The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array. + - object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so. + - value can be int, double, string, bool or NULL, and it can't be an array. Nesting is not allowed which means that the value of a key can't be JSON. + - If one key occurs twice in JSON, only the first one is valid. + - Escape characters are not allowed in JSON. - NULL is returned when querying a key that doesn't exist in JSON. - If a tag of JSON is the result of inner query, it can't be parsed and queried in the outer query. -For example, the SQL statements below are not supported. + For example, the SQL statements below are not supported. -```sql; -select jtag->'key' from (select jtag from STable); -select jtag->'key' from (select jtag from STable) where jtag->'key'>0; -``` + ``` + select jtag->'key' from (select jtag from stable) + ``` + + and + + ``` + select jtag->'key' from (select jtag from stable) where jtag->'key'>0 + ``` diff --git a/docs/en/12-taos-sql/18-escape.md b/docs/en/12-taos-sql/18-escape.md index 34ce9f7848a9d60811a23286a6675e8afa4f04fe..a2ae40de98be677e599e83a634952a39faeaafbf 100644 --- a/docs/en/12-taos-sql/18-escape.md +++ b/docs/en/12-taos-sql/18-escape.md @@ -2,7 +2,7 @@ title: Escape Characters --- -Below table is the list of escape characters used in TDengine. +## Escape Characters | Escape Character | **Actual Meaning** | | :--------------: | ------------------------ | @@ -15,11 +15,6 @@ Below table is the list of escape characters used in TDengine. | `\%` | % see below for details | | `\_` | \_ see below for details | -:::note -Escape characters are available from version 2.4.0.4 . - -::: - ## Restrictions 1. If there are escape characters in identifiers (database name, table name, column name) diff --git a/docs/en/12-taos-sql/19-limit.md b/docs/en/12-taos-sql/19-limit.md index ff552fc9771f5b428554acc62e9aeac03a305ecc..678c38a22ea763187cd0c87dceae3bf6ca03957c 100644 --- a/docs/en/12-taos-sql/19-limit.md +++ b/docs/en/12-taos-sql/19-limit.md @@ -1,59 +1,59 @@ --- -sidebar_label: 命名与边界限制 -title: 命名与边界限制 +sidebar_label: Name and Size Limits +title: Name and Size Limits --- -## 名称命名规则 +## Naming Rules -1. 合法字符:英文字符、数字和下划线 -2. 允许英文字符或下划线开头,不允许以数字开头 -3. 不区分大小写 -4. 转义后表(列)名规则: - 为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`"。可用让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查 - 转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一 +1. Names can include letters, digits, and underscores (_). +2. Names can begin with letters or underscores (_) but not with digits. +3. Names are not case-sensitive. +4. Rules for names with escape characters are as follows: + You can escape a name by enclosing it in backticks (`). In this way, you can reuse keyword names for table names. However, the first three naming rules no longer apply. + Table and column names that are enclosed in escape characters are still subject to length limits. When the length of such a name is calculated, the escape characters are not included. Names specified using escape character are case-sensitive. - 例如:\`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。 - 需要注意的是转义字符中的内容必须是可打印字符。 + For example, \`aBc\` and \`abc\` are different table or column names, but "abc" and "aBc" are same names because internally they are all "abc". + Only ASCII visible characters can be used with escape character. -## 密码合法字符集 +## Password Rules `[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]` -去掉了 `` ‘“`\ `` (单双引号、撇号、反斜杠、空格) +The following characters cannot occur in a password: single quotation marks ('), double quotation marks ("), backticks (`), backslashes (\\), and spaces. -## 一般限制 +## General Limits -- 数据库名最大长度为 32 -- 表名最大长度为 192,不包括数据库名前缀和分隔符 -- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置) -- 列名最大长度为 64 -- 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。 -- 标签名最大长度为 64 -- 最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB -- SQL 语句最大长度 1048576 个字符,也可通过客户端配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 -- SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错 -- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 -- 数据库的副本数只能设置为 1 或 3 -- 用户名的最大长度是 23 个字节 -- 用户密码的最大长度是 15 个字节 -- 总数据行数取决于可用资源 -- 单个数据库的虚拟结点数上限为 1024 +- Maximum length of database name is 32 bytes +- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator. +- Maximum length of each data row is 48K bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type. +- The maximum length of a column name is 64 bytes. +- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp. +- The maximum length of a tag name is 64 bytes +- Maximum number of tags is 128. There must be at least 1 tag. The total length of tag values cannot exceed 16 KB. +- Maximum length of single SQL statement is 1 MB (1048576 bytes). +- At most 4096 columns can be returned by `SELECT`. Functions in the query statement constitute columns. An error is returned if the limit is exceeded. +- Maximum numbers of databases, STables, tables are dependent only on the system resources. +- The number of replicas can only be 1 or 3. +- The maximum length of a username is 23 bytes. +- The maximum length of a password is 15 bytes. +- The maximum number of rows depends on system resources. +- The maximum number of vnodes in a database is 1024. -## 表(列)名合法性说明 +## Restrictions of Table/Column Names -### TDengine 中的表(列)名命名规则如下: +### Name Restrictions of Table/Column -只能由字母、数字、下划线构成,数字不能在首位,长度不能超过 192 字节,不区分大小写。这里表名称不包括数据库名的前缀和分隔符。 +The name of a table or column can only be composed of ASCII characters, digits and underscore and it cannot start with a digit. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator. -### 转义后表(列)名规则: +### Name Restrictions After Escaping -为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`",可以避免表名与关键词的冲突,同时不受限于上述表名合法性约束检查,转义符不计入表名的长度。 -转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一。 +To support more flexible table or column names, new escape character "\`" is introduced in TDengine to avoid the conflict between table name and keywords and break the above restrictions for table names. The escape character is not counted in the length of table name. +With escaping, the string inside escape characters are case sensitive, i.e. will not be converted to lower case internally. The table names specified using escape character are case sensitive. -例如: -\`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。 +For example: +\`aBc\` and \`abc\` are different table or column names, but "abc" and "aBc" are same names because internally they are all "abc". :::note -转义字符中的内容必须是可打印字符。 +The characters inside escape characters must be printable characters. ::: diff --git a/docs/en/12-taos-sql/20-keywords.md b/docs/en/12-taos-sql/20-keywords.md index 6d40deb5a696141cbd7bf8dd01bba6a251ef8908..6f166c8034382b0613845d18470556622106e673 100644 --- a/docs/en/12-taos-sql/20-keywords.md +++ b/docs/en/12-taos-sql/20-keywords.md @@ -1,10 +1,11 @@ --- -title: Keywords +sidebar_label: Reserved Keywords +title: Reserved Keywords --- -There are about 200 keywords reserved by TDengine, they can't be used as the name of database, STable or table with either upper case, lower case or mixed case. +## Keyword List -## Keywords List +There are about 200 keywords reserved by TDengine, they can't be used as the name of database, STable or table with either upper case, lower case or mixed case. The following list shows all reserved keywords: ### A @@ -57,70 +58,70 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam ### D -- DATABASE -- DATABASES -- DAYS -- DBS -- DEFERRED +- DATABASE +- DATABASES +- DAYS +- DBS +- DEFERRED - DELETE - DELIMITERS -- DESC -- DESCRIBE -- DETACH -- DISTINCT -- DIVIDE -- DNODE -- DNODES -- DOT -- DOUBLE -- DROP +- DESC +- DESCRIBE +- DETACH +- DISTINCT +- DIVIDE +- DNODE +- DNODES +- DOT +- DOUBLE +- DROP ### E -- END -- EQ -- EXISTS -- EXPLAIN +- END +- EQ +- EXISTS +- EXPLAIN ### F -- FAIL -- FILE -- FILL -- FLOAT -- FOR -- FROM -- FSYNC +- FAIL +- FILE +- FILL +- FLOAT +- FOR +- FROM +- FSYNC ### G -- GE -- GLOB +- GE +- GLOB - GRANTS -- GROUP -- GT +- GROUP +- GT ### H -- HAVING +- HAVING ### I - ID - IF -- IGNORE +- IGNORE - IMMEDIA -- IMPORT -- IN +- IMPORT +- IN - INITIAL -- INSERT +- INSERT - INSTEAD -- INT +- INT - INTEGER - INTERVA -- INTO -- IS -- ISNULL +- INTO +- IS +- ISNULL ### J @@ -129,187 +130,147 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam ### K - KEEP -- KEY +- KEY - KILL ### L -- LE -- LIKE -- LIMIT +- LE +- LIKE +- LIMIT - LINEAR -- LOCAL -- LP +- LOCAL +- LP - LSHIFT -- LT +- LT ### M -- MATCH -- MAXROWS -- MINROWS -- MINUS -- MNODES -- MODIFY -- MODULES +- MATCH +- MAXROWS +- MINROWS +- MINUS +- MNODES +- MODIFY +- MODULES ### N -- NE -- NONE -- NOT +- NE +- NONE +- NOT - NOTNULL -- NOW +- NOW - NULL ### O -- OF +- OF - OFFSET -- OR -- ORDER +- OR +- ORDER ### P - PARTITION -- PASS -- PLUS -- PPS +- PASS +- PLUS +- PPS - PRECISION -- PREV +- PREV - PRIVILEGE ### Q -- QTIME +- QTIME - QUERIE -- QUERY +- QUERY - QUORUM ### R -- RAISE -- REM +- RAISE +- REM - REPLACE - REPLICA -- RESET +- RESET - RESTRIC -- ROW -- RP +- ROW +- RP - RSHIFT ### S -- SCORES -- SELECT -- SEMI +- SCORES +- SELECT +- SEMI - SESSION -- SET -- SHOW -- SLASH +- SET +- SHOW +- SLASH - SLIDING -- SLIMIT +- SLIMIT - SMALLIN - SOFFSET -- STable +- STable - STableS -- STAR -- STATE +- STAR +- STATE - STATEMEN - STATE_WI -- STORAGE -- STREAM -- STREAMS -- STRING -- SYNCDB +- STORAGE +- STREAM +- STREAMS +- STRING +- SYNCDB ### T -- TABLE -- TABLES -- TAG -- TAGS -- TBNAME -- TIMES -- TIMESTAMP -- TINYINT -- TOPIC -- TOPICS -- TRIGGER -- TSERIES +- TABLE +- TABLES +- TAG +- TAGS +- TBNAME +- TIMES +- TIMESTAMP +- TINYINT +- TOPIC +- TOPICS +- TRIGGER +- TSERIES ### U -- UMINUS -- UNION -- UNSIGNED -- UPDATE -- UPLUS -- USE -- USER -- USERS -- USING +- UMINUS +- UNION +- UNSIGNED +- UPDATE +- UPLUS +- USE +- USER +- USERS +- USING ### V -- VALUES -- VARIABLE +- VALUES +- VARIABLE - VARIABLES -- VGROUPS -- VIEW -- VNODES +- VGROUPS +- VIEW +- VNODES ### W - WAL - WHERE -### _ - -- _C0 -- _QSTART -- _QSTOP -- _QDURATION -- _WSTART -- _WSTOP -- _WDURATION - -## Explanations -### TBNAME -`TBNAME` can be considered as a special tag, which represents the name of the subtable, in a STable. - -Get the table name and tag values of all subtables in a STable. -```mysql -SELECT TBNAME, location FROM meters; -``` - -Count the number of subtables in a STable. -```mysql -SELECT COUNT(TBNAME) FROM meters; -``` - -Only filter on TAGS can be used in WHERE clause in the above two query statements. -```mysql -taos> SELECT TBNAME, location FROM meters; - tbname | location | -================================================================== - d1004 | California.SanFrancisco | - d1003 | California.SanFrancisco | - d1002 | California.LosAngeles | - d1001 | California.LosAngeles | -Query OK, 4 row(s) in set (0.000881s) - -taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; - count(tbname) | -======================== - 2 | -Query OK, 1 row(s) in set (0.001091s) -``` -### _QSTART/_QSTOP/_QDURATION -The start, stop and duration of a query time window. - -### _WSTART/_WSTOP/_WDURATION -The start, stop and duration of aggegate query by time window, like interval, session window, state window. - -### _c0/_ROWTS -_c0 is equal to _ROWTS, it means the first column of a table or STable. +### \_ + +- \_C0 +- \_QSTART +- \_QSTOP +- \_QDURATION +- \_WSTART +- \_WSTOP +- \_WDURATION diff --git a/docs/en/12-taos-sql/21-node.md b/docs/en/12-taos-sql/21-node.md index 4816daf42042c0607aebf37c8b57961e5b1927fe..8bb895f73cd26edd1ec2ddabf08b842ceecf76fe 100644 --- a/docs/en/12-taos-sql/21-node.md +++ b/docs/en/12-taos-sql/21-node.md @@ -1,37 +1,37 @@ --- -sidebar_label: 集群管理 -title: 集群管理 +sidebar_label: Cluster +title: Cluster --- -组成 TDengine 集群的物理实体是 dnode (data node 的缩写),它是一个运行在操作系统之上的进程。在 dnode 中可以建立负责时序数据存储的 vnode (virtual node),在多节点集群环境下当某个数据库的 replica 为 3 时,该数据库中的每个 vgroup 由 3 个 vnode 组成;当数据库的 replica 为 1 时,该数据库中的每个 vgroup 由 1 个 vnode 组成。如果要想配置某个数据库为多副本,则集群中的 dnode 数量至少为 3。在 dnode 还可以创建 mnode (management node),单个集群中最多可以创建三个 mnode。在 TDengine 3.0.0.0 中为了支持存算分离,引入了一种新的逻辑节点 qnode (query node),qnode 和 vnode 既可以共存在一个 dnode 中,也可以完全分离在不同的 dnode 上。 +The physical entities that form TDengine clusters are known as data nodes (dnodes). Each dnode is a process running on the operating system of the physical machine. Dnodes can contain virtual nodes (vnodes), which store time-series data. Virtual nodes are formed into vgroups, which have 1 or 3 vnodes depending on the replica setting. If you want to enable replication on your cluster, it must contain at least three nodes. Dnodes can also contain management nodes (mnodes). Each cluster has up to three mnodes. Finally, dnodes can contain query nodes (qnodes), which compute time-series data, thus separating compute from storage. A single dnode can contain a vnode, qnode, and mnode. -## 创建数据节点 +## Create a Dnode ```sql CREATE DNODE {dnode_endpoint | dnode_host_name PORT port_val} ``` -其中 `dnode_endpoint` 是形成 `hostname:port`的格式。也可以分开指定 hostname 和 port。 +Enter the dnode_endpoint in hostname:port format. You can also specify the hostname and port as separate parameters. -实际操作中推荐先创建 dnode,再启动相应的 dnode 进程,这样该 dnode 就可以立即根据其配置文件中的 firstEP 加入集群。每个 dnode 在加入成功后都会被分配一个 ID。 +Create the dnode before starting the corresponding dnode process. The dnode can then join the cluster based on the value of the firstEp parameter. Each dnode is assigned an ID after it joins a cluster. -## 查看数据节点 +## View Dnodes ```sql SHOW DNODES; ``` -可以列出集群中所有的数据节点,所列出的字段有 dnode 的 ID, endpoint, status。 +The preceding SQL command shows all dnodes in the cluster with the ID, endpoint, and status. -## 删除数据节点 +## Delete a DNODE ```sql DROP DNODE {dnode_id | dnode_endpoint} ``` -可以用 dnoe_id 或 endpoint 两种方式从集群中删除一个 dnode。注意删除 dnode 不等于停止相应的进程。实际中推荐先将一个 dnode 删除之后再停止其所对应的进程。 +You can delete a dnode by its ID or by its endpoint. Note that deleting a dnode does not stop its process. You must stop the process after the dnode is deleted. -## 修改数据节点配置 +## Modify Dnode Configuration ```sql ALTER DNODE dnode_id dnode_option @@ -62,59 +62,59 @@ dnode_option: { } ``` -上面语法中的这些可修改配置项其配置方式与 dnode 配置文件中的配置方式相同,区别是修改是动态的立即生效,且不需要重启 dnode。 +The parameters that you can modify through this statement are the same as those located in the dnode configuration file. Modifications that you make through this statement take effect immediately, while modifications to the configuration file take effect when the dnode restarts. -## 添加管理节点 +## Add an Mnode ```sql CREATE MNODE ON DNODE dnode_id ``` -系统启动默认在 firstEP 节点上创建一个 MNODE,用户可以使用此语句创建更多的 MNODE 来提高系统可用性。一个集群最多存在三个 MNODE,一个 DNODE 上只能创建一个 MNODE。 +TDengine automatically creates an mnode on the firstEp node. You can use this statement to create more mnodes for higher system availability. A cluster can have a maximum of three mnodes. Each dnode can contain only one mnode. -## 查看管理节点 +## View Mnodes ```sql SHOW MNODES; ``` -列出集群中所有的管理节点,包括其 ID,所在 DNODE 以及状态。 +This statement shows all mnodes in the cluster with the ID, dnode, and status. -## 删除管理节点 +## Delete an Mnode ```sql DROP MNODE ON DNODE dnode_id; ``` -删除 dnode_id 所指定的 DNODE 上的 MNODE。 +This statement deletes the mnode located on the specified dnode. -## 创建查询节点 +## Create a Qnode ```sql CREATE QNODE ON DNODE dnode_id; ``` -系统启动默认没有 QNODE,用户可以创建 QNODE 来实现计算和存储的分离。一个 DNODE 上只能创建一个 QNODE。一个 DNODE 的 `supportVnodes` 参数如果不为 0,同时又在其上创建上 QNODE,则在该 dnode 中既有负责存储管理的 vnode 又有负责查询计算的 qnode,如果还在该 dnode 上创建了 mnode,则一个 dnode 上最多三种逻辑节点都可以存在。但通过配置也可以使其彻底分离。将一个 dnode 的`supportVnodes`配置为 0,可以选择在其上创建 mnode 或者 qnode 中的一种,这样可以实现三种逻辑节点在物理上的彻底分离。 +TDengine does not automatically create qnodes on startup. You can create qnodes as necessary for compute/storage separation. Each dnode can contain only one qnode. If a qnode is created on a dnode whose supportVnodes parameter is not 0, a vnode and qnode may coexist on the dnode. Each dnode can have a maximum of one vnode, one qnode, and one mnode. However, you can configure your cluster so that vnodes, qnodes, and mnodes are located on separate dnodes. If you set supportVnodes to 0 for a dnode, you can then decide whether to deploy an mnode or a qnode on it. In this way you can physically separate virtual node types. -## 查看查询节点 +## View Qnodes ```sql SHOW QNODES; ``` -列出集群中所有查询节点,包括 ID,及所在 DNODE。 +This statement shows all qnodes in the cluster with the ID and dnode. -## 删除查询节点 +## Delete a Qnode ```sql DROP QNODE ON DNODE dnode_id; ``` -删除 ID 为 dnode_id 的 DNODE 上的 QNODE,但并不会影响该 dnode 的状态。 +This statement deletes the mnode located on the specified dnode. This does not affect the status of the dnode. -## 修改客户端配置 +## Modify Client Configuration -如果将客户端也看作广义的集群的一部分,可以通过如下命令动态修改客户端配置参数。 +The client configuration can also be modified in a similar way to other cluster components. ```sql ALTER LOCAL local_option @@ -129,26 +129,26 @@ local_option: { } ``` -上面语法中的参数与在配置文件中配置客户端的用法相同,但不需要重启客户端,修改后立即生效。 +The parameters that you can modify through this statement are the same as those located in the client configuration file. Modifications that you make through this statement take effect immediately, while modifications to the configuration file take effect when the client restarts. -## 查看客户端配置 +## View Client Configuration ```sql SHOW LOCAL VARIABLES; ``` -## 合并 vgroup +## Combine Vgroups ```sql MERGE VGROUP vgroup_no1 vgroup_no2; ``` -如果在系统实际运行一段时间后,因为不同时间线的数据特征不同导致在 vgroups 之间的数据和负载分布不均衡,可以通过合并或拆分 vgroups 的方式逐步实现负载均衡。 +If load and data are not properly balanced among vgroups due to the data in different tim lines having different characteristics, you can combine or separate vgroups. -## 拆分 vgroup +## Separate Vgroups ```sql SPLIT VGROUP vgroup_no; ``` -会创建一个新的 vgroup,并将指定 vgroup 中的数据按照一致性 HASH 迁移一部分到新的 vgroup 中。此过程中,原 vgroup 可以正常提供读写服务。 +This statement creates a new vgroup and migrates part of the data from the original vgroup to the new vgroup with consistent hashing. During this process, the original vgroup can continue to provide services normally. diff --git a/docs/en/12-taos-sql/22-meta.md b/docs/en/12-taos-sql/22-meta.md index 1e178706859a3e5fa5dbabc00777b92639d76617..9bda5a0a1027243ea5f50c55e303fdb7155c853b 100644 --- a/docs/en/12-taos-sql/22-meta.md +++ b/docs/en/12-taos-sql/22-meta.md @@ -1,247 +1,279 @@ --- -sidebar_label: 元数据库 -title: 元数据库 +sidebar_label: Metadata +title: Information_Schema Database --- -TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。`INFORMATION_SCHEMA` 数据库旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES)所提供的信息的访问。与 SHOW 语句相比,使用 SELECT ... FROM INFORMATION_SCHEMA.tablename 具有以下优点: +TDengine includes a built-in database named `INFORMATION_SCHEMA` to provide access to database metadata, system information, and status information. This information includes database names, table names, and currently running SQL statements. All information related to TDengine maintenance is stored in this database. It contains several read-only tables. These tables are more accurately described as views, and they do not correspond to specific files. You can query these tables but cannot write data to them. The INFORMATION_SCHEMA database is intended to provide a unified method for SHOW commands to access data. However, using SELECT ... FROM INFORMATION_SCHEMA.tablename offers several advantages over SHOW commands: -1. 可以使用 USE 语句将 INFORMATION_SCHEMA 设为默认数据库 -2. 可以使用 SELECT 语句熟悉的语法,只需要学习一些表名和列名 -3. 可以对查询结果进行筛选、排序等操作。事实上,可以使用任意 TDengine 支持的 SELECT 语句对 INFORMATION_SCHEMA 中的表进行查询 -4. TDengine 在后续演进中可以灵活的添加已有 INFORMATION_SCHEMA 中表的列,而不用担心对既有业务系统造成影响 -5. 与其他数据库系统更具互操作性。例如,Oracle 数据库用户熟悉查询 Oracle 数据字典中的表 +1. You can use a USE statement to specify the INFORMATION_SCHEMA database as the current database. +2. You can use the familiar SELECT syntax to access information, provided that you know the table and column names. +3. You can filter and order the query results. More generally, you can use any SELECT syntax that TDengine supports to query the INFORMATION_SCHEMA database. +4. Future versions of TDengine can add new columns to INFORMATION_SCHEMA tables without affecting existing business systems. +5. It is easier for users coming from other database management systems. For example, Oracle users can query data dictionary tables. -Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们仍然被保留。 +Note: SHOW statements are still supported for the convenience of existing users. -本章将详细介绍 `INFORMATION_SCHEMA` 这个内置元数据库中的表和表结构。 +This document introduces the tables of INFORMATION_SCHEMA and their structure. ## INS_DNODES -提供 dnode 的相关信息。也可以使用 SHOW DNODES 来查询这些信息。 +Provides information about dnodes. Similar to SHOW DNODES. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :------------: | ------------ | ------------------------- | -| 1 | vnodes | SMALLINT | dnode 中的实际 vnode 个数 | -| 2 | support_vnodes | SMALLINT | 最多支持的 vnode 个数 | -| 3 | status | BINARY(10) | 当前状态 | -| 4 | note | BINARY(256) | 离线原因等信息 | -| 5 | id | SMALLINT | dnode id | -| 6 | endpoint | BINARY(134) | dnode 的地址 | -| 7 | create | TIMESTAMP | 创建时间 | +| 1 | vnodes | SMALLINT | Current number of vnodes on the dnode | +| 2 | vnodes | SMALLINT | Maximum number of vnodes on the dnode | +| 3 | status | BINARY(10) | Current status | +| 4 | note | BINARY(256) | Reason for going offline or other information | +| 5 | id | SMALLINT | Dnode ID | +| 6 | endpoint | BINARY(134) | Dnode endpoint | +| 7 | create | TIMESTAMP | Creation time | ## INS_MNODES -提供 mnode 的相关信息。也可以使用 SHOW MNODES 来查询这些信息。 +Provides information about mnodes. Similar to SHOW MNODES. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------ | ------------------ | -| 1 | id | SMALLINT | mnode id | -| 2 | endpoint | BINARY(134) | mnode 的地址 | -| 3 | role | BINARY(10) | 当前角色 | -| 4 | role_time | TIMESTAMP | 成为当前角色的时间 | -| 5 | create_time | TIMESTAMP | 创建时间 | +| 1 | id | SMALLINT | Mnode ID | +| 2 | endpoint | BINARY(134) | Mnode endpoint | +| 3 | role | BINARY(10) | Current role | +| 4 | role_time | TIMESTAMP | Time at which the current role was assumed | +| 5 | create_time | TIMESTAMP | Creation time | ## INS_MODULES -提供组件的相关信息。也可以使用 SHOW MODULES 来查询这些信息 +Provides information about modules. Similar to SHOW MODULES. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :------: | ------------ | ---------- | -| 1 | id | SMALLINT | module id | -| 2 | endpoint | BINARY(134) | 组件的地址 | -| 3 | module | BINARY(10) | 组件状态 | +| 1 | id | SMALLINT | Module ID | +| 2 | endpoint | BINARY(134) | Module endpoint | +| 3 | module | BINARY(10) | Module status | ## INS_QNODES -当前系统中 QNODE 的信息。也可以使用 SHOW QNODES 来查询这些信息。 +Provides information about qnodes. Similar to SHOW QNODES. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------ | ------------ | -| 1 | id | SMALLINT | qnode id | -| 2 | endpoint | BINARY(134) | qnode 的地址 | -| 3 | create_time | TIMESTAMP | 创建时间 | +| 1 | id | SMALLINT | Qnode ID | +| 2 | endpoint | BINARY(134) | Qnode endpoint | +| 3 | create_time | TIMESTAMP | Creation time | ## INS_CLUSTER -存储集群相关信息。 +Provides information about the cluster. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------ | ---------- | -| 1 | id | BIGINT | cluster id | -| 2 | name | BINARY(134) | 集群名称 | -| 3 | create_time | TIMESTAMP | 创建时间 | +| 1 | id | BIGINT | Cluster ID | +| 2 | name | BINARY(134) | Cluster name | +| 3 | create_time | TIMESTAMP | Creation time | ## INS_DATABASES -提供用户创建的数据库对象的相关信息。也可以使用 SHOW DATABASES 来查询这些信息。 +Provides information about user-created databases. Similar to SHOW DATABASES. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :------------------: | ---------------- | ------------------------------------------------ | -| 1 | name | BINARY(32) | 数据库名 | -| 2 | create_time | TIMESTAMP | 创建时间 | -| 3 | ntables | INT | 数据库中表的数量,包含子表和普通表但不包含超级表 | -| 4 | vgroups | INT | 数据库中有多少个 vgroup | -| 6 | replica | INT | 副本数 | -| 7 | quorum | BINARY(3) | 强一致性 | -| 8 | duration | INT | 单文件存储数据的时间跨度 | -| 9 | keep | INT | 数据保留时长 | -| 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB | -| 11 | pagesize | INT | 每个 VNODE 中元数据存储引擎的页大小,单位为 KB | -| 12 | pages | INT | 每个 vnode 元数据存储引擎的缓存页个数 | -| 13 | minrows | INT | 文件块中记录的最大条数 | -| 14 | maxrows | INT | 文件块中记录的最小条数 | -| 15 | comp | INT | 数据压缩方式 | -| 16 | precision | BINARY(2) | 时间分辨率 | -| 17 | status | BINARY(10) | 数据库状态 | -| 18 | retention | BINARY (60) | 数据的聚合周期和保存时长 | -| 19 | single_stable | BOOL | 表示此数据库中是否只可以创建一个超级表 | -| 20 | cachemodel | BINARY(60) | 表示是否在内存中缓存子表的最近数据 | -| 21 | cachesize | INT | 表示每个 vnode 中用于缓存子表最近数据的内存大小 | -| 22 | wal_level | INT | WAL 级别 | -| 23 | wal_fsync_period | INT | 数据落盘周期 | -| 24 | wal_retention_period | INT | WAL 的保存时长 | -| 25 | wal_retention_size | INT | WAL 的保存上限 | -| 26 | wal_roll_period | INT | wal 文件切换时长 | -| 27 | wal_segment_size | wal 单个文件大小 | +| 1| name| BINARY(32)| Database name | +| 2 | create_time | TIMESTAMP | Creation time | +| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) | +| 4 | vgroups | INT | Number of vgroups | +| 6 | replica | INT | Number of replicas | +| 7 | quorum | BINARY(3) | Strong consistency | +| 8 | duration | INT | Duration for storage of single files | +| 9 | keep | INT | Data retention period | +| 10 | buffer | INT | Write cache size per vnode, in MB | +| 11 | pagesize | INT | Page size for vnode metadata storage engine, in KB | +| 12 | pages | INT | Number of pages per vnode metadata storage engine | +| 13 | minrows | INT | Maximum number of records per file block | +| 14 | maxrows | INT | Minimum number of records per file block | +| 15 | comp | INT | Compression method | +| 16 | precision | BINARY(2) | Time precision | +| 17 | status | BINARY(10) | Current database status | +| 18 | retention | BINARY (60) | Aggregation interval and retention period | +| 19 | single_stable | BOOL | Whether the database can contain multiple supertables | +| 20 | cachemodel | BINARY(60) | Caching method for the newest data | +| 21 | cachesize | INT | Memory per vnode used for caching the newest data | +| 22 | wal_level | INT | WAL level | +| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk | +| 24 | wal_retention_period | INT | WAL retention period | +| 25 | wal_retention_size | INT | Maximum WAL size | +| 26 | wal_roll_period | INT | WAL rotation period | +| 27 | wal_segment_size | WAL file size | ## INS_FUNCTIONS -用户创建的自定义函数的信息。 +Provides information about user-defined functions. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------ | -------------- | -| 1 | name | BINARY(64) | 函数名 | -| 2 | comment | BINARY(255) | 补充说明 | -| 3 | aggregate | INT | 是否为聚合函数 | -| 4 | output_type | BINARY(31) | 输出类型 | -| 5 | create_time | TIMESTAMP | 创建时间 | -| 6 | code_len | INT | 代码长度 | -| 7 | bufsize | INT | buffer 大小 | +| 1 | name | BINARY(64) | Function name | +| 2 | comment | BINARY(255) | Function description | +| 3 | aggregate | INT | Whether the UDF is an aggregate function | +| 4 | output_type | BINARY(31) | Output data type | +| 5 | create_time | TIMESTAMP | Creation time | +| 6 | code_len | INT | Length of the source code | +| 7 | bufsize | INT | Buffer size | ## INS_INDEXES -提供用户创建的索引的相关信息。也可以使用 SHOW INDEX 来查询这些信息。 +Provides information about user-created indices. Similar to SHOW INDEX. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :--------------: | ------------ | ---------------------------------------------------------------------------------- | -| 1 | db_name | BINARY(32) | 包含此索引的表所在的数据库名 | -| 2 | table_name | BINARY(192) | 包含此索引的表的名称 | -| 3 | index_name | BINARY(192) | 索引名 | -| 4 | column_name | BINARY(64) | 建索引的列的列名 | -| 5 | index_type | BINARY(10) | 目前有 SMA 和 FULLTEXT | -| 6 | index_extensions | BINARY(256) | 索引的额外信息。对 SMA 类型的索引,是函数名的列表。对 FULLTEXT 类型的索引为 NULL。 | +| 1 | db_name | BINARY(32) | Database containing the table with the specified index | +| 2 | table_name | BINARY(192) | Table containing the specified index | +| 3 | index_name | BINARY(192) | Index name | +| 4 | db_name | BINARY(64) | Index column | +| 5 | index_type | BINARY(10) | SMA or FULLTEXT index | +| 6 | index_extensions | BINARY(256) | Other information For SMA indices, this shows a list of functions. For FULLTEXT indices, this is null. | ## INS_STABLES -提供用户创建的超级表的相关信息。 +Provides information about supertables. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :-----------: | ------------ | ------------------------ | -| 1 | stable_name | BINARY(192) | 超级表表名 | -| 2 | db_name | BINARY(64) | 超级表所在的数据库的名称 | -| 3 | create_time | TIMESTAMP | 创建时间 | -| 4 | columns | INT | 列数目 | -| 5 | tags | INT | 标签数目 | -| 6 | last_update | TIMESTAMP | 最后更新时间 | -| 7 | table_comment | BINARY(1024) | 表注释 | -| 8 | watermark | BINARY(64) | 窗口的关闭时间 | -| 9 | max_delay | BINARY(64) | 推送计算结果的最大延迟 | -| 10 | rollup | BINARY(128) | rollup 聚合函数 | +| 1 | stable_name | BINARY(192) | Supertable name | +| 2 | db_name | BINARY(64) | All databases in the supertable | +| 3 | create_time | TIMESTAMP | Creation time | +| 4 | columns | INT | Number of columns | +| 5 | tags | INT | Number of tags | +| 6 | last_update | TIMESTAMP | Last updated time | +| 7 | table_comment | BINARY(1024) | Table description | +| 8 | watermark | BINARY(64) | Window closing time | +| 9 | max_delay | BINARY(64) | Maximum delay for pushing stream processing results | +| 10 | rollup | BINARY(128) | Rollup aggregate function | ## INS_TABLES -提供用户创建的普通表和子表的相关信息 +Provides information about standard tables and subtables. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :-----------: | ------------ | ---------------- | -| 1 | table_name | BINARY(192) | 表名 | -| 2 | db_name | BINARY(64) | 数据库名 | -| 3 | create_time | TIMESTAMP | 创建时间 | -| 4 | columns | INT | 列数目 | -| 5 | stable_name | BINARY(192) | 所属的超级表表名 | -| 6 | uid | BIGINT | 表 id | -| 7 | vgroup_id | INT | vgroup id | -| 8 | ttl | INT | 表的生命周期 | -| 9 | table_comment | BINARY(1024) | 表注释 | -| 10 | type | BINARY(20) | 表类型 | +| 1 | table_name | BINARY(192) | Table name | +| 2 | db_name | BINARY(64) | Database name | +| 3 | create_time | TIMESTAMP | Creation time | +| 4 | columns | INT | Number of columns | +| 5 | stable_name | BINARY(192) | Supertable name | +| 6 | uid | BIGINT | Table ID | +| 7 | vgroup_id | INT | Vgroup ID | +| 8 | ttl | INT | Table time-to-live | +| 9 | table_comment | BINARY(1024) | Table description | +| 10 | type | BINARY(20) | Table type | ## INS_TAGS -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------- | ---------------------- | -| 1 | table_name | BINARY(192) | 表名 | -| 2 | db_name | BINARY(64) | 该表所在的数据库的名称 | -| 3 | stable_name | BINARY(192) | 所属的超级表表名 | -| 4 | tag_name | BINARY(64) | tag 的名称 | -| 5 | tag_type | BINARY(64) | tag 的类型 | -| 6 | tag_value | BINARY(16384) | tag 的值 | +| 1 | table_name | BINARY(192) | Table name | +| 2 | db_name | BINARY(64) | Database name | +| 3 | stable_name | BINARY(192) | Supertable name | +| 4 | tag_name | BINARY(64) | Tag name | +| 5 | tag_type | BINARY(64) | Tag type | +| 6 | tag_value | BINARY(16384) | Tag value | ## INS_USERS -提供系统中创建的用户的相关信息。 +Provides information about TDengine users. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------ | -------- | -| 1 | user_name | BINARY(23) | 用户名 | -| 2 | privilege | BINARY(256) | 权限 | -| 3 | create_time | TIMESTAMP | 创建时间 | +| 1 | user_name | BINARY(23) | User name | +| 2 | privilege | BINARY(256) | User permissions | +| 3 | create_time | TIMESTAMP | Creation time | ## INS_GRANTS -提供企业版授权的相关信息。 +Provides information about TDengine Enterprise Edition permissions. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :---------: | ------------ | -------------------------------------------------- | -| 1 | version | BINARY(9) | 企业版授权说明:official(官方授权的)/trial(试用的) | -| 2 | cpu_cores | BINARY(9) | 授权使用的 CPU 核心数量 | -| 3 | dnodes | BINARY(10) | 授权使用的 dnode 节点数量 | -| 4 | streams | BINARY(10) | 授权创建的流数量 | -| 5 | users | BINARY(10) | 授权创建的用户数量 | -| 6 | accounts | BINARY(10) | 授权创建的帐户数量 | -| 7 | storage | BINARY(21) | 授权使用的存储空间大小 | -| 8 | connections | BINARY(21) | 授权使用的客户端连接数量 | -| 9 | databases | BINARY(11) | 授权使用的数据库数量 | -| 10 | speed | BINARY(9) | 授权使用的数据点每秒写入数量 | -| 11 | querytime | BINARY(9) | 授权使用的查询总时长 | -| 12 | timeseries | BINARY(21) | 授权使用的测点数量 | -| 13 | expired | BINARY(5) | 是否到期,true:到期,false:未到期 | -| 14 | expire_time | BINARY(19) | 试用期到期时间 | +| 1 | version | BINARY(9) | Whether the deployment is a licensed or trial version | +| 2 | cpu_cores | BINARY(9) | CPU cores included in license | +| 3 | dnodes | BINARY(10) | Dnodes included in license | +| 4 | streams | BINARY(10) | Streams included in license | +| 5 | users | BINARY(10) | Users included in license | +| 6 | streams | BINARY(10) | Accounts included in license | +| 7 | storage | BINARY(21) | Storage space included in license | +| 8 | connections | BINARY(21) | Client connections included in license | +| 9 | databases | BINARY(11) | Databases included in license | +| 10 | speed | BINARY(9) | Write speed specified in license (data points per second) | +| 11 | querytime | BINARY(9) | Total query time specified in license | +| 12 | timeseries | BINARY(21) | Number of metrics included in license | +| 13 | expired | BINARY(5) | Whether the license has expired | +| 14 | expire_time | BINARY(19) | When the trial period expires | ## INS_VGROUPS -系统中所有 vgroups 的信息。 +Provides information about vgroups. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :-------: | ------------ | ------------------------------------------------------ | -| 1 | vgroup_id | INT | vgroup id | -| 2 | db_name | BINARY(32) | 数据库名 | -| 3 | tables | INT | 此 vgroup 内有多少表 | -| 4 | status | BINARY(10) | 此 vgroup 的状态 | -| 5 | v1_dnode | INT | 第一个成员所在的 dnode 的 id | -| 6 | v1_status | BINARY(10) | 第一个成员的状态 | -| 7 | v2_dnode | INT | 第二个成员所在的 dnode 的 id | -| 8 | v2_status | BINARY(10) | 第二个成员的状态 | -| 9 | v3_dnode | INT | 第三个成员所在的 dnode 的 id | -| 10 | v3_status | BINARY(10) | 第三个成员的状态 | -| 11 | nfiles | INT | 此 vgroup 中数据/元数据文件的数量 | -| 12 | file_size | INT | 此 vgroup 中数据/元数据文件的大小 | -| 13 | tsma | TINYINT | 此 vgroup 是否专用于 Time-range-wise SMA,1: 是, 0: 否 | +| 1 | vgroup_id | INT | Vgroup ID | +| 2 | db_name | BINARY(32) | Database name | +| 3 | tables | INT | Tables in vgroup | +| 4 | status | BINARY(10) | Vgroup status | +| 5 | v1_dnode | INT | Dnode ID of first vgroup member | +| 6 | v1_status | BINARY(10) | Status of first vgroup member | +| 7 | v2_dnode | INT | Dnode ID of second vgroup member | +| 8 | v2_status | BINARY(10) | Status of second vgroup member | +| 9 | v3_dnode | INT | Dnode ID of third vgroup member | +| 10 | v3_status | BINARY(10) | Status of third vgroup member | +| 11 | nfiles | INT | Number of data and metadata files in the vgroup | +| 12 | file_size | INT | Size of the data and metadata files in the vgroup | +| 13 | tsma | TINYINT | Whether time-range-wise SMA is enabled. 1 means enabled; 0 means disabled. | ## INS_CONFIGS -系统配置参数。 +Provides system configuration information. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :------: | ------------ | ------------ | -| 1 | name | BINARY(32) | 配置项名称 | -| 2 | value | BINARY(64) | 该配置项的值 | +| 1 | name | BINARY(32) | Parameter | +| 2 | value | BINARY(64) | Value | ## INS_DNODE_VARIABLES -系统中每个 dnode 的配置参数。 +Provides dnode configuration information. -| # | **列名** | **数据类型** | **说明** | +| # | **Column** | **Data Type** | **Description** | | --- | :------: | ------------ | ------------ | -| 1 | dnode_id | INT | dnode 的 ID | -| 2 | name | BINARY(32) | 配置项名称 | -| 3 | value | BINARY(64) | 该配置项的值 | +| 1 | dnode_id | INT | Dnode ID | +| 2 | name | BINARY(32) | Parameter | +| 3 | value | BINARY(64) | Value | + +## INS_TOPICS + +| # | **Column** | **Data Type** | **Description** | +| --- | :---------: | ------------ | ------------------------------ | +| 1 | topic_name | BINARY(192) | Topic name | +| 2 | db_name | BINARY(64) | Database for the topic | +| 3 | create_time | TIMESTAMP | Creation time | +| 4 | sql | BINARY(1024) | SQL statement used to create the topic | + +## INS_SUBSCRIPTIONS + +| # | **Column** | **Data Type** | **Description** | +| --- | :------------: | ------------ | ------------------------ | +| 1 | topic_name | BINARY(204) | Subscribed topic | +| 2 | consumer_group | BINARY(193) | Subscribed consumer group | +| 3 | vgroup_id | INT | Vgroup ID for the consumer | +| 4 | consumer_id | BIGINT | Consumer ID | + +## INS_STREAMS + +| # | **Column** | **Data Type** | **Description** | +| --- | :----------: | ------------ | --------------------------------------- | +| 1 | stream_name | BINARY(64) | Stream name | +| 2 | create_time | TIMESTAMP | Creation time | +| 3 | sql | BINARY(1024) | SQL statement used to create the stream | +| 4 | status | BIANRY(20) | Current status | +| 5 | source_db | BINARY(64) | Source database | +| 6 | target_db | BIANRY(64) | Target database | +| 7 | target_table | BINARY(192) | Target table | +| 8 | watermark | BIGINT | Watermark (see stream processing documentation) | +| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) | diff --git a/docs/en/12-taos-sql/23-perf.md b/docs/en/12-taos-sql/23-perf.md new file mode 100644 index 0000000000000000000000000000000000000000..29cf3af6abfbbc06e42ae99c78f35f33a3c7c30a --- /dev/null +++ b/docs/en/12-taos-sql/23-perf.md @@ -0,0 +1,97 @@ +--- +sidebar_label: Statistics +title: Performance_Schema Database +--- + +TDengine includes a built-in database named `PERFORMANCE_SCHEMA` to provide access to database performance statistics. This document introduces the tables of PERFORMANCE_SCHEMA and their structure. + +## PERF_APP + +Provides information about clients (such as applications) that connect to the cluster. Similar to SHOW APPS. + +| # | **Column** | **Data Type** | **Description** | +| --- | :----------: | ------------ | ------------------------------- | +| 1 | app_id | UBIGINT | Client ID | +| 2 | ip | BINARY(16) | Client IP address | +| 3 | pid | INT | Client process | +| 4 | name | BINARY(24) | Client name | +| 5 | start_time | TIMESTAMP | Time when client was started | +| 6 | insert_req | UBIGINT | Insert requests | +| 7 | insert_row | UBIGINT | Rows inserted | +| 8 | insert_time | UBIGINT | Time spent processing insert requests in microseconds | +| 9 | insert_bytes | UBIGINT | Size of data inserted in byted | +| 10 | fetch_bytes | UBIGINT | Size of query results in bytes | +| 11 | query_time | UBIGINT | Time spend processing query requests | +| 12 | slow_query | UBIGINT | Number of slow queries (greater than or equal to 3 seconds) | +| 13 | total_req | UBIGINT | Total requests | +| 14 | current_req | UBIGINT | Requests currently being processed | +| 15 | last_access | TIMESTAMP | Last update time | + +## PERF_CONNECTIONS + +Provides information about connections to the database. Similar to SHOW CONNECTIONS. + +| # | **Column** | **Data Type** | **Description** | +| --- | :---------: | ------------ | -------------------------------------------------- | +| 1 | conn_id | INT | Connection ID | +| 2 | user | BINARY(24) | User name | +| 3 | app | BINARY(24) | Client name | +| 4 | pid | UINT | Client process ID on client device that initiated the connection | +| 5 | end_point | BINARY(128) | Client endpoint | +| 6 | login_time | TIMESTAMP | Login time | +| 7 | last_access | TIMESTAMP | Last update time | + +## PERF_QUERIES + +Provides information about SQL queries currently running. Similar to SHOW QUERIES. + +| # | **Column** | **Data Type** | **Description** | +| --- | :----------: | ------------ | ---------------------------- | +| 1 | kill_id | UBIGINT | ID used to stop the query | +| 2 | query_id | INT | Query ID | +| 3 | conn_id | UINT | Connection ID | +| 4 | app | BINARY(24) | Client name | +| 5 | pid | INT | Client process ID on client device | +| 6 | user | BINARY(24) | User name | +| 7 | end_point | BINARY(16) | Client endpoint | +| 8 | create_time | TIMESTAMP | Creation time | +| 9 | exec_usec | BIGINT | Elapsed time | +| 10 | stable_query | BOOL | Whether the query is on a supertable | +| 11 | sub_num | INT | Number of subqueries | +| 12 | sub_status | BINARY(1000) | Subquery status | +| 13 | sql | BINARY(1024) | SQL statement | + +## PERF_CONSUMERS + +| # | **Column** | **Data Type** | **Description** | +| --- | :------------: | ------------ | ----------------------------------------------------------- | +| 1 | consumer_id | BIGINT | Consumer ID | +| 2 | consumer_group | BINARY(192) | Consumer group | +| 3 | client_id | BINARY(192) | Client ID (user-defined) | +| 4 | status | BINARY(20) | Consumer status | +| 5 | topics | BINARY(204) | Subscribed topic. Returns one row for each topic. | +| 6 | up_time | TIMESTAMP | Time of first connection to TDengine Server | +| 7 | subscribe_time | TIMESTAMP | Time of first subscription | +| 8 | rebalance_time | TIMESTAMP | Time of first rebalance triggering | + +## PERF_TRANS + +| # | **Column** | **Data Type** | **Description** | +| --- | :--------------: | ------------ | -------------------------------------------------------------- | +| 1 | id | INT | ID of the transaction currently running | +| 2 | create_time | TIMESTAMP | Creation time | +| 3 | stage | BINARY(12) | Transaction stage (redoAction, undoAction, or commit) | +| 4 | db1 | BINARY(64) | First database having a conflict with the transaction | +| 5 | db2 | BINARY(64) | Second database having a conflict with the transaction | +| 6 | failed_times | INT | Times the transaction has failed | +| 7 | last_exec_time | TIMESTAMP | Previous time the transaction was run | +| 8 | last_action_info | BINARY(511) | Reason for failure on previous run | + +## PERF_SMAS + +| # | **Column** | **Data Type** | **Description** | +| --- | :---------: | ------------ | ------------------------------------------- | +| 1 | sma_name | BINARY(192) | Time-range-wise SMA name | +| 2 | create_time | TIMESTAMP | Creation time | +| 3 | stable_name | BINARY(192) | Supertable name | +| 4 | vgroup_id | INT | Dedicated vgroup name | diff --git a/docs/en/12-taos-sql/24-show.md b/docs/en/12-taos-sql/24-show.md index 781f94324c78e7975abde33803cffdb914da020c..c9adb0cf782d1da63a8f9654f6c89b02a60a7cb7 100644 --- a/docs/en/12-taos-sql/24-show.md +++ b/docs/en/12-taos-sql/24-show.md @@ -1,9 +1,9 @@ --- -sidebar_label: SHOW 命令 -title: 使用 SHOW 命令查看系统元数据 +sidebar_label: SHOW Statement +title: SHOW Statement for Metadata --- -除了使用 `select` 语句查询 `INFORMATION_SCHEMA` 数据库中的表获得系统中的各种元数据、系统信息和状态之外,也可以用 `SHOW` 命令来实现同样的目的。 +`SHOW` command can be used to get brief system information. To get details about metatadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`. ## SHOW ACCOUNTS @@ -11,9 +11,9 @@ title: 使用 SHOW 命令查看系统元数据 SHOW ACCOUNTS; ``` -显示当前系统中所有租户的信息。 +Shows information about tenants on the system. -注:企业版独有 +Note: TDengine Enterprise Edition only. ## SHOW APPS @@ -21,7 +21,7 @@ SHOW ACCOUNTS; SHOW APPS; ``` -显示接入集群的应用(客户端)信息。 +Shows all clients (such as applications) that connect to the cluster. ## SHOW BNODES @@ -29,7 +29,7 @@ SHOW APPS; SHOW BNODES; ``` -显示当前系统中存在的 BNODE (backup node, 即备份节点)的信息。 +Shows information about backup nodes (bnodes) in the system. ## SHOW CLUSTER @@ -37,7 +37,7 @@ SHOW BNODES; SHOW CLUSTER; ``` -显示当前集群的信息 +Shows information about the current cluster. ## SHOW CONNECTIONS @@ -45,7 +45,7 @@ SHOW CLUSTER; SHOW CONNECTIONS; ``` -显示当前系统中存在的连接的信息。 +Shows information about connections to the system. ## SHOW CONSUMERS @@ -53,7 +53,7 @@ SHOW CONNECTIONS; SHOW CONSUMERS; ``` -显示当前数据库下所有活跃的消费者的信息。 +Shows information about all active consumers in the system. ## SHOW CREATE DATABASE @@ -61,7 +61,7 @@ SHOW CONSUMERS; SHOW CREATE DATABASE db_name; ``` -显示 db_name 指定的数据库的创建语句。 +Shows the SQL statement used to create the specified database. ## SHOW CREATE STABLE @@ -69,7 +69,7 @@ SHOW CREATE DATABASE db_name; SHOW CREATE STABLE [db_name.]stb_name; ``` -显示 tb_name 指定的超级表的创建语句 +Shows the SQL statement used to create the specified supertable. ## SHOW CREATE TABLE @@ -77,7 +77,7 @@ SHOW CREATE STABLE [db_name.]stb_name; SHOW CREATE TABLE [db_name.]tb_name ``` -显示 tb_name 指定的表的创建语句。支持普通表、超级表和子表。 +Shows the SQL statement used to create the specified table. This statement can be used on supertables, standard tables, and subtables. ## SHOW DATABASES @@ -85,7 +85,7 @@ SHOW CREATE TABLE [db_name.]tb_name SHOW DATABASES; ``` -显示用户定义的所有数据库。 +Shows all user-created databases. ## SHOW DNODES @@ -93,7 +93,7 @@ SHOW DATABASES; SHOW DNODES; ``` -显示当前系统中 DNODE 的信息。 +Shows all dnodes in the system. ## SHOW FUNCTIONS @@ -101,7 +101,7 @@ SHOW DNODES; SHOW FUNCTIONS; ``` -显示用户定义的自定义函数。 +Shows all user-defined functions in the system. ## SHOW LICENSE @@ -110,9 +110,9 @@ SHOW LICENSE; SHOW GRANTS; ``` -显示企业版许可授权的信息。 +Shows information about the TDengine Enterprise Edition license. -注:企业版独有 +Note: TDengine Enterprise Edition only. ## SHOW INDEXES @@ -120,7 +120,7 @@ SHOW GRANTS; SHOW INDEXES FROM tbl_name [FROM db_name]; ``` -显示已创建的索引。 +Shows indices that have been created. ## SHOW LOCAL VARIABLES @@ -128,7 +128,7 @@ SHOW INDEXES FROM tbl_name [FROM db_name]; SHOW LOCAL VARIABLES; ``` -显示当前客户端配置参数的运行值。 +Shows the working configuration of the client. ## SHOW MNODES @@ -136,7 +136,7 @@ SHOW LOCAL VARIABLES; SHOW MNODES; ``` -显示当前系统中 MNODE 的信息。 +Shows information about mnodes in the system. ## SHOW MODULES @@ -144,7 +144,7 @@ SHOW MNODES; SHOW MODULES; ``` -显示当前系统中所安装的组件的信息。 +Shows information about modules installed in the system. ## SHOW QNODES @@ -152,7 +152,7 @@ SHOW MODULES; SHOW QNODES; ``` -显示当前系统中 QNODE (查询节点)的信息。 +Shows information about qnodes in the system. ## SHOW SCORES @@ -160,9 +160,9 @@ SHOW QNODES; SHOW SCORES; ``` -显示系统被许可授权的容量的信息。 +Shows information about the storage space allowed by the license. -注:企业版独有 +Note: TDengine Enterprise Edition only. ## SHOW SNODES @@ -170,7 +170,7 @@ SHOW SCORES; SHOW SNODES; ``` -显示当前系统中 SNODE (流计算节点)的信息。 +Shows information about stream processing nodes (snodes) in the system. ## SHOW STABLES @@ -178,7 +178,7 @@ SHOW SNODES; SHOW [db_name.]STABLES [LIKE 'pattern']; ``` -显示当前数据库下的所有超级表的信息。可以使用 LIKE 对表名进行模糊匹配。 +Shows all supertables in the current database. You can use LIKE for fuzzy matching. ## SHOW STREAMS @@ -186,7 +186,7 @@ SHOW [db_name.]STABLES [LIKE 'pattern']; SHOW STREAMS; ``` -显示当前系统内所有流计算的信息。 +Shows information about streams in the system. ## SHOW SUBSCRIPTIONS @@ -194,7 +194,7 @@ SHOW STREAMS; SHOW SUBSCRIPTIONS; ``` -显示当前数据库下的所有的订阅关系 +Shows all subscriptions in the system. ## SHOW TABLES @@ -202,7 +202,7 @@ SHOW SUBSCRIPTIONS; SHOW [db_name.]TABLES [LIKE 'pattern']; ``` -显示当前数据库下的所有普通表和子表的信息。可以使用 LIKE 对表名进行模糊匹配。 +Shows all standard tables and subtables in the current database. You can use LIKE for fuzzy matching. ## SHOW TABLE DISTRIBUTED @@ -210,7 +210,7 @@ SHOW [db_name.]TABLES [LIKE 'pattern']; SHOW TABLE DISTRIBUTED table_name; ``` -显示表的数据分布信息。 +Shows how table data is distributed. ## SHOW TAGS @@ -218,7 +218,7 @@ SHOW TABLE DISTRIBUTED table_name; SHOW TAGS FROM child_table_name [FROM db_name]; ``` -显示子表的标签信息。 +Shows all tag information in a subtable. ## SHOW TOPICS @@ -226,7 +226,7 @@ SHOW TAGS FROM child_table_name [FROM db_name]; SHOW TOPICS; ``` -显示当前数据库下的所有主题的信息。 +Shows all topics in the current database. ## SHOW TRANSACTIONS @@ -234,7 +234,7 @@ SHOW TOPICS; SHOW TRANSACTIONS; ``` -显示当前系统中正在执行的事务的信息 +Shows all running transactions in the system. ## SHOW USERS @@ -242,7 +242,7 @@ SHOW TRANSACTIONS; SHOW USERS; ``` -显示当前系统中所有用户的信息。包括用户自定义的用户和系统默认用户。 +Shows information about users on the system. This includes user-created users and system-defined users. ## SHOW VARIABLES @@ -251,7 +251,7 @@ SHOW VARIABLES; SHOW DNODE dnode_id VARIABLES; ``` -显示当前系统中各节点需要相同的配置参数的运行值,也可以指定 DNODE 来查看其的配置参数。 +Shows the working configuration of the parameters that must be the same on each node. You can also specify a dnode to show the working configuration for that node. ## SHOW VGROUPS @@ -259,7 +259,7 @@ SHOW DNODE dnode_id VARIABLES; SHOW [db_name.]VGROUPS; ``` -显示当前系统中所有 VGROUP 或某个 db 的 VGROUPS 的信息。 +Shows information about all vgroups in the system or about the vgroups for a specified database. ## SHOW VNODES @@ -267,4 +267,4 @@ SHOW [db_name.]VGROUPS; SHOW VNODES [dnode_name]; ``` -显示当前系统中所有 VNODE 或某个 DNODE 的 VNODE 的信息。 +Shows information about all vnodes in the system or about the vnodes for a specified dnode. diff --git a/docs/en/12-taos-sql/25-grant.md b/docs/en/12-taos-sql/25-grant.md index 0c290350cc155e975e5a817c991bebc74944cd04..b9a3fa2321c8d073845d0cf9157ce335c930e06f 100644 --- a/docs/en/12-taos-sql/25-grant.md +++ b/docs/en/12-taos-sql/25-grant.md @@ -1,29 +1,30 @@ --- -sidebar_label: 权限管理 -title: 权限管理 +sidebar_label: Access Control +title: User and Access Control +description: Manage user and user's permission --- -本节讲述如何在 TDengine 中进行权限管理的相关操作。 +This document describes how to manage permissions in TDengine. -## 创建用户 +## Create a User ```sql -CREATE USER use_name PASS password; +CREATE USER use_name PASS 'password'; ``` -创建用户。 +This statement creates a user account. -use_name最长为23字节。 +The maximum length of use_name is 23 bytes. -password最长为128字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。 +The maximum length of password is 128 bytes. The password can include leters, digits, and special characters excluding single quotation marks, double quotation marks, backticks, backslashes, and spaces. The password cannot be empty. -## 删除用户 +## Delete a User ```sql DROP USER user_name; ``` -## 修改用户信息 +## Modify User Information ```sql ALTER USER user_name alter_user_clause @@ -35,12 +36,12 @@ alter_user_clause: { } ``` -- PASS:修改用户密码。 -- ENABLE:修改用户是否启用。1表示启用此用户,0表示禁用此用户。 -- SYSINFO:修改用户是否可查看系统信息。1表示可以查看系统信息,0表示不可以查看系统信息。 +- PASS: Modify the user password. +- ENABLE: Specify whether the user is enabled or disabled. 1 indicates enabled and 0 indicates disabled. +- SYSINFO: Specify whether the user can query system information. 1 indicates that the user can query system information and 0 indicates that the user cannot query system information. -## 授权 +## Grant Permissions ```sql GRANT privileges ON priv_level TO user_name @@ -61,15 +62,15 @@ priv_level : { } ``` -对用户授权。 +Grant permissions to a user. -授权级别支持到DATABASE,权限有READ和WRITE两种。 +Permissions are granted on the database level. You can grant read or write permissions. -TDengine 有超级用户和普通用户两类用户。超级用户缺省创建为root,拥有所有权限。使用超级用户创建出来的用户为普通用户。在未授权的情况下,普通用户可以创建DATABASE,并拥有自己创建的DATABASE的所有权限,包括删除数据库、修改数据库、查询时序数据和写入时序数据。超级用户可以给普通用户授予其他DATABASE的读写权限,使其可以在此DATABASE上读写数据,但不能对其进行删除和修改数据库的操作。 +TDengine has superusers and standard users. The default superuser name is root. This account has all permissions. You can use the superuser account to create standard users. With no permissions, standard users can create databases and have permissions on the databases that they create. These include deleting, modifying, querying, and writing to their own databases. Superusers can grant users permission to read and write other databases. However, standard users cannot delete or modify databases created by other users. -对于非DATABASE的对象,如USER、DNODE、UDF、QNODE等,普通用户只有读权限(一般为SHOW命令),不能创建和修改。 +For non-database objects such as users, dnodes, and user-defined functions, standard users have read permissions only, generally by means of the SHOW statement. Standard users cannot create or modify these objects. -## 撤销授权 +## Revoke Permissions ```sql REVOKE privileges ON priv_level FROM user_name @@ -91,4 +92,4 @@ priv_level : { ``` -收回对用户的授权。 \ No newline at end of file +Revoke permissions from a user. diff --git a/docs/en/12-taos-sql/26-udf.md b/docs/en/12-taos-sql/26-udf.md index bd8d61a5844241efae9eee99a73c65afd3d0926f..e6199e8b315c2311be509a3eb819f33ac9a8b8bc 100644 --- a/docs/en/12-taos-sql/26-udf.md +++ b/docs/en/12-taos-sql/26-udf.md @@ -1,28 +1,68 @@ --- -sidebar_label: 自定义函数 -title: 用户自定义函数 +sidebar_label: User-Defined Functions +title: User-Defined Functions (UDF) --- -除了 TDengine 的内置函数以外,用户还可以编写自己的函数逻辑并加入TDengine系统中。 +You can create user-defined functions and import them into TDengine. +## Create UDF -## 创建函数 +SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted. +When creating UDF, the type of UDF, i.e. a scalar function or aggregate function must be specified. If the specified type is wrong, the SQL statements using the function would fail with errors. The input data type and output data type must be consistent with the UDF definition. + +- Create Scalar Function ```sql -CREATE [AGGREGATE] FUNCTION func_name AS library_path OUTPUTTYPE type_name [BUFSIZE value] +CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type; ``` -语法说明: + - function_name: The scalar function name to be used in SQL statement which must be consistent with the UDF name and is also the name of the compiled DLL (.so file). + - library_path: The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes. + - output_type: The data type of the results of the UDF. + + For example, the following SQL statement can be used to create a UDF from `libbitand.so`. + + ```sql + CREATE FUNCTION bit_and AS "/home/taos/udf_example/libbitand.so" OUTPUTTYPE INT; + ``` + +- Create Aggregate Function +```sql +CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ]; +``` + + - function_name: The aggregate function name to be used in SQL statement which must be consistent with the udfNormalFunc name and is also the name of the compiled DLL (.so file). + - library_path: The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes. + - output_type: The output data type, the value is the literal string of the supported TDengine data type. + - buffer_size: The size of the intermediate buffer in bytes. This parameter is optional. + + For example, the following SQL statement can be used to create a UDF from `libl2norm.so`. + + ```sql + CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8; + ``` +For more information about user-defined functions, see [User-Defined Functions](../../develop/udf). -AGGREGATE:标识此函数是标量函数还是聚集函数。 -func_name:函数名,必须与函数实现中udfNormalFunc的实际名称一致。 -library_path:包含UDF函数实现的动态链接库的绝对路径,是在客户端侧主机上的绝对路径。 -OUTPUTTYPE:标识此函数的返回类型。 -BUFSIZE:中间结果的缓冲区大小,单位是字节。不设置则默认为0。最大不可超过512字节。 +## Manage UDF -关于如何开发自定义函数,请参考 [UDF使用说明](../../develop/udf)。 +- The following statement deleted the specified user-defined function. +``` +DROP FUNCTION function_name; +``` -## 删除自定义函数 +- function_name: The value of function_name in the CREATE statement used to import the UDF for example `bit_and` or `l2norm`. +```sql +DROP FUNCTION bit_and; +``` +- Show Available UDF +```sql +SHOW FUNCTIONS; +``` + +## Call UDF +The function name specified when creating UDF can be used directly in SQL statements, just like builtin functions. For example: ```sql -DROP FUNCTION func_name -``` \ No newline at end of file +SELECT X(c1,c2) FROM table/stable; +``` + +The above SQL statement invokes function X for column c1 and c2. You can use query keywords like WHERE with user-defined functions. diff --git a/docs/en/12-taos-sql/27-index.md b/docs/en/12-taos-sql/27-index.md index 2c0907723e76f304566e6a19bdef2d63225f903f..7d09bc43ab06932b82019923d4a8fda48cd99c97 100644 --- a/docs/en/12-taos-sql/27-index.md +++ b/docs/en/12-taos-sql/27-index.md @@ -1,11 +1,11 @@ --- -sidebar_label: 索引 -title: 使用索引 +sidebar_label: Index +title: Using Indices --- -TDengine 从 3.0.0.0 版本开始引入了索引功能,支持 SMA 索引和 FULLTEXT 索引。 +TDengine supports SMA and FULLTEXT indexing. -## 创建索引 +## Create an Index ```sql CREATE FULLTEXT INDEX index_name ON tb_name (col_name [, col_name] ...) @@ -19,29 +19,29 @@ functions: function [, function] ... ``` -### SMA 索引 +### SMA Indexing -对指定列按 INTERVAL 子句定义的时间窗口创建进行预聚合计算,预聚合计算类型由 functions_string 指定。SMA 索引能提升指定时间段的聚合查询的性能。目前,限制一个超级表只能创建一个 SMA INDEX。 +Performs pre-aggregation on the specified column over the time window defined by the INTERVAL clause. The type is specified in functions_string. SMA indexing improves aggregate query performance for the specified time period. One supertable can only contain one SMA index. -- 支持的函数包括 MAX、MIN 和 SUM。 -- WATERMARK: 最小单位毫秒,取值范围 [0ms, 900000ms],默认值为 5 秒,只可用于超级表。 -- MAX_DELAY: 最小单位毫秒,取值范围 [1ms, 900000ms],默认值为 interval 的值(但不能超过最大值),只可用于超级表。注:不建议 MAX_DELAY 设置太小,否则会过于频繁的推送结果,影响存储和查询性能,如无特殊需求,取默认值即可。 +- The max, min, and sum functions are supported. +- WATERMARK: Enter a value between 0ms and 900000ms. The most precise unit supported is milliseconds. The default value is 5 seconds. This option can be used only on supertables. +- MAX_DELAY: Enter a value between 1ms and 900000ms. The most precise unit supported is milliseconds. The default value is the value of interval provided that it does not exceed 900000ms. This option can be used only on supertables. Note: Retain the default value if possible. Configuring a small MAX_DELAY may cause results to be frequently pushed, affecting storage and query performance. -### FULLTEXT 索引 +### FULLTEXT Indexing -对指定列建立文本索引,可以提升含有文本过滤的查询的性能。FULLTEXT 索引不支持 index_option 语法。现阶段只支持对 JSON 类型的标签列创建 FULLTEXT 索引。不支持多列联合索引,但可以为每个列分布创建 FULLTEXT 索引。 +Creates a text index for the specified column. FULLTEXT indexing improves performance for queries with text filtering. The index_option syntax is not supported for FULLTEXT indexing. FULLTEXT indexing is supported for JSON tag columns only. Multiple columns cannot be indexed together. However, separate indices can be created for each column. -## 删除索引 +## Delete an Index ```sql DROP INDEX index_name; ``` -## 查看索引 +## View Indices ````sql ```sql SHOW INDEXES FROM tbl_name [FROM db_name]; ```` -显示在所指定的数据库或表上已创建的索引。 +Shows indices that have been created for the specified database or table. diff --git a/docs/en/12-taos-sql/28-recovery.md b/docs/en/12-taos-sql/28-recovery.md index 72b220b8ff44917831ac16301237702c991b9b15..14ac14f8673fba05fee09317de927df00effed0f 100644 --- a/docs/en/12-taos-sql/28-recovery.md +++ b/docs/en/12-taos-sql/28-recovery.md @@ -1,38 +1,38 @@ --- -sidebar_label: 异常恢复 -title: 异常恢复 +sidebar_label: Error Recovery +title: Error Recovery --- -在一个复杂的应用场景中,连接和查询任务等有可能进入一种错误状态或者耗时过长迟迟无法结束,此时需要有能够终止这些连接或任务的方法。 +In a complex environment, connections and query tasks may encounter errors or fail to return in a reasonable time. If this occurs, you can terminate the connection or task. -## 终止连接 +## Terminate a Connection ```sql KILL CONNECTION conn_id; ``` -conn_id 可以通过 `SHOW CONNECTIONS` 获取。 +You can use the SHOW CONNECTIONS statement to find the conn_id. -## 终止查询 +## Terminate a Query ```sql SHOW QUERY query_id; ``` -query_id 可以通过 `SHOW QUERIES` 获取。 +You can use the SHOW QUERIES statement to find the query_id. -## 终止事务 +## Terminate a Transaction ```sql KILL TRANSACTION trans_id ``` -trans_id 可以通过 `SHOW TRANSACTIONS` 获取。 +You can use the SHOW TRANSACTIONS statement to find the trans_id. -## 重置客户端缓存 +## Reset Client Cache ```sql RESET QUERY CACHE; ``` -如果在多客户端情况下出现元数据不同步的情况,可以用这条命令强制清空客户端缓存,随后客户端会从服务端拉取最新的元数据。 +If metadata becomes desynchronized among multiple clients, you can use this command to clear the client-side cache. Clients then obtain the latest metadata from the server. diff --git a/docs/en/12-taos-sql/29-changes.md b/docs/en/12-taos-sql/29-changes.md new file mode 100644 index 0000000000000000000000000000000000000000..8532eeac5d599ca2739393c9e38eec52631e407a --- /dev/null +++ b/docs/en/12-taos-sql/29-changes.md @@ -0,0 +1,95 @@ +--- +sidebar_label: Changes in TDengine 3.0 +title: Changes in TDengine 3.0 +description: "This document explains how TDengine SQL has changed in version 3.0." +--- + +## Basic SQL Elements + +| # | **Element** | **
Change
** | **Description** | +| - | :------- | :-------- | :------- | +| 1 | VARCHAR | Added | Alias of BINARY. +| 2 | TIMESTAMP literal | Added | TIMESTAMP 'timestamp format' syntax now supported. +| 3 | _ROWTS pseudocolumn | Added | Indicates the primary key. Alias of _C0. +| 4 | INFORMATION_SCHEMA | Added | Database for system metadata containing all schema definitions +| 5 | PERFORMANCE_SCHEMA | Added | Database for system performance information. +| 6 | Connection queries | Deprecated | Connection queries are no longer supported. The syntax and interfaces are deprecated. +| 7 | Mixed operations | Enhanced | Mixing scalar and vector operations in queries has been enhanced and is supported in all SELECT clauses. +| 8 | Tag operations | Added | Tag columns can be used in queries and clauses like data columns. +| 9 | Timeline clauses and time functions in supertables | Enhanced | When PARTITION BY is not used, data in supertables is merged into a single timeline. + +## SQL Syntax + +The following data types can be used in the schema for standard tables. + +| # | **Statement** | **
Change
** | **Description** | +| - | :------- | :-------- | :------- | +| 1 | ALTER ACCOUNT | Deprecated| This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported." +| 2 | ALTER ALL DNODES | Added | Modifies the configuration of all dnodes. +| 3 | ALTER DATABASE | Modified | Deprecated
  • QUORUM: Specified the required number of confirmations. STRICT is now used to specify strong or weak consistency. The STRICT parameter cannot be modified.
  • BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode.
  • UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns.
  • CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST.
  • COMP: Cannot be modified.
    Added
  • CACHEMODEL: Specifies whether to cache the latest subtable data.
  • CACHESIZE: Specifies the size of the cache for the newest subtable data.
  • WAL_FSYNC_PERIOD: Replaces the FSYNC parameter.
  • WAL_LEVEL: Replaces the WAL parameter.
    Modified
  • REPLICA: Cannot be modified.
  • KEEP: Now supports units.
+| 4 | ALTER STABLE | Modified | Deprecated
  • CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG.
    Added
  • RENAME TAG: Replaces CHANGE TAG.
  • COMMENT: Specifies comments for a supertable.
+| 5 | ALTER TABLE | Modified | Deprecated
  • CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG.
    Added
  • RENAME TAG: Replaces CHANGE TAG.
  • COMMENT: Specifies comments for a standard table.
  • TTL: Specifies the time-to-live for a standard table.
+| 6 | ALTER USER | Modified | Deprecated
  • PRIVILEGE: Specified user permissions. Replaced by GRANT and REVOKE.
    Added
  • ENABLE: Enables or disables a user.
  • SYSINFO: Specifies whether a user can query system information.
+| 7 | COMPACT VNODES | Not supported | Compacted the data on a vnode. Not supported. +| 8 | CREATE ACCOUNT | Deprecated| This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported." +| 9 | CREATE DATABASE | Modified | Deprecated
  • BLOCKS: Specified the number of blocks for each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode.
  • CACHE: Specified the size of the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode.
  • CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST.
  • DAYS: The length of time to store in a single file. Replaced by DURATION.
  • FSYNC: Specified the fsync interval when WAL was set to 2. Replaced by WAL_FSYNC_PERIOD.
  • QUORUM: Specified the number of confirmations required. STRICT is now used to specify strong or weak consistency.
  • UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns.
  • WAL: Specified the WAL level. Replaced by WAL_LEVEL.
    Added
  • BUFFER: Specifies the size of the write cache pool for each vnode.
  • CACHEMODEL: Specifies whether to cache the latest subtable data.
  • CACHESIZE: Specifies the size of the cache for the newest subtable data.
  • DURATION: Replaces DAYS. Now supports units.
  • PAGES: Specifies the number of pages in the metadata storage engine cache on each vnode.
  • PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode.
  • RETENTIONS: Specifies the aggregation interval and retention period
  • STRICT: Specifies whether strong data consistency is enabled.
  • SINGLE_STABLE: Specifies whether a database can contain multiple supertables.
  • VGROUPS: Specifies the initial number of vgroups when a database is created.
  • WAL_FSYNC_PERIOD: Replaces the FSYNC parameter.
  • WAL_LEVEL: Replaces the WAL parameter.
  • WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription.
  • WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription.
  • WAL_ROLL_PERIOD: Specifies the WAL rotation period.
  • WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file.
    Modified
  • KEEP: Now supports units.
+| 10 | CREATE DNODE | Modified | Now supports specifying hostname and port separately
  • CREATE DNODE dnode_host_name PORT port_val
+| 11 | CREATE INDEX | Added | Creates an SMA index. +| 12 | CREATE MNODE | Added | Creates an mnode. +| 13 | CREATE QNODE | Added | Creates a qnode. +| 14 | CREATE STABLE | Modified | New parameter added
  • COMMENT: Specifies comments for the supertable.
  • +| 15 | CREATE STREAM | Added | Creates a stream. +| 16 | CREATE TABLE | Modified | New parameters added
    • COMMENT: Specifies comments for the table
    • WATERMARK: Specifies the window closing time.
    • MAX_DELAY: Specifies the maximum delay for pushing stream processing results.
    • ROLLUP: Specifies aggregate functions to roll up. Rolling up a function provides downsampled results based on multiple axes.
    • SMA: Provides user-defined precomputation of aggregates based on data blocks.
    • TTL: Specifies the time-to-live for a standard table.
    +| 17 | CREATE TOPIC | Added | Creates a topic. +| 18 | DROP ACCOUNT | Deprecated| This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported." +| 19 | DROP CONSUMER GROUP | Added | Deletes a consumer group. +| 20 | DROP INDEX | Added | Deletes an index. +| 21 | DROP MNODE | Added | Creates an mnode. +| 22 | DROP QNODE | Added | Creates a qnode. +| 23 | DROP STREAM | Added | Deletes a stream. +| 24 | DROP TABLE | Modified | Added batch deletion syntax. +| 25 | DROP TOPIC | Added | Deletes a topic. +| 26 | EXPLAIN | Added | Query the execution plan of a query statement. +| 27 | GRANT | Added | Grants permissions to a user. +| 28 | KILL TRANSACTION | Added | Terminates an mnode transaction. +| 29 | KILL STREAM | Deprecated | Terminated a continuous query. The continuous query feature has been replaced with the stream processing feature. +| 30 | MERGE VGROUP | Added | Merges vgroups. +| 31 | REVOKE | Added | Revokes permissions from a user. +| 32 | SELECT | Modified |
    • SELECT does not use the implicit results column. Output columns must be specified in the SELECT clause.
    • DISTINCT support is enhanced. In previous versions, DISTINCT only worked on the tag column and could not be used with JOIN or GROUP BY.
    • JOIN support is enhanced. The following are now supported after JOIN: a WHERE clause with OR, operations on multiple tables, and GROUP BY on multiple tables.
    • Subqueries after FROM are enhanced. Levels of nesting are no longer restricted. Subqueries can be used with UNION ALL. Other syntax restrictions are eliminated.
    • All scalar functions can be used after WHERE.
    • GROUP BY is enhanced. You can group by any scalar expression or combination thereof.
    • SESSION can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline.
    • STATE_WINDOW can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline.
    • ORDER BY is enhanced. It is no longer required to use ORDER BY and GROUP BY together. There is no longer a restriction on the number of order expressions. NULLS FIRST and NULLS LAST syntax has been added. Any expression that conforms to the ORDER BY semantics can be used.
    • Added PARTITION BY syntax. PARTITION BY replaces GROUP BY tags.
    +| 33 | SHOW ACCOUNTS | Deprecated | This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported." +| 34 | SHOW APPS | Added | Shows all clients (such as applications) that connect to the cluster. +| 35 | SHOW CONSUMERS | Added | Shows information about all active consumers in the system. +| 36 | SHOW DATABASES | Modified | Only shows database names. +| 37 | SHOW FUNCTIONS | Modified | Only shows UDF names. +| 38 | SHOW LICENCE | Added | Alias of SHOW GRANTS. +| 39 | SHOW INDEXES | Added | Shows indices that have been created. +| 40 | SHOW LOCAL VARIABLES | Added | Shows the working configuration of the client. +| 41 | SHOW MODULES | Deprecated | Shows information about modules installed in the system. +| 42 | SHOW QNODES | Added | Shows information about qnodes in the system. +| 43 | SHOW STABLES | Modified | Only shows supertable names. +| 44 | SHOW STREAMS | Modified | This statement previously showed continuous queries. The continuous query feature has been replaced with the stream processing feature. This statement now shows streams that have been created. +| 45 | SHOW SUBSCRIPTIONS | Added | Shows all subscriptions in the current database. +| 46 | SHOW TABLES | Modified | Only shows table names. +| 47 | SHOW TABLE DISTRIBUTED | Added | Shows how table data is distributed. This replaces the `SELECT _block_dist() FROM { tb_name | stb_name }` command. +| 48 | SHOW TOPICS | Added | Shows all subscribed topics in the current database. +| 49 | SHOW TRANSACTIONS | Added | Shows all running transactions in the system. +| 50 | SHOW DNODE VARIABLES | Added | Shows the configuration of the specified dnode. +| 51 | SHOW VNODES | Not supported | Shows information about vnodes in the system. Not supported. +| 52 | SPLIT VGROUP | Added | Splits a vgroup into two vgroups. +| 53 | TRIM DATABASE | Added | Deletes data that has expired and orders the remaining data in accordance with the storage configuration. + +## SQL Functions + +| # | **Function** | **
    Change
    ** | **Description** | +| - | :------- | :-------- | :------- | +| 1 | TWA | Added | Can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. +| 2 | IRATE | Enhanced | Can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. +| 3 | LEASTSQUARES | Enhanced | Can be used on supertables. +| 4 | ELAPSED | Enhanced | Can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. +| 5 | DIFF | Enhanced | Can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. +| 6 | DERIVATIVE | Enhanced | Can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. +| 7 | CSUM | Enhanced | Can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. +| 8 | MAVG | Enhanced | Can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. +| 9 | SAMPLE | Enhanced | Can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. +| 10 | STATECOUNT | Enhanced | Can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. +| 11 | STATEDURATION | Enhanced | Can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. diff --git a/docs/en/12-taos-sql/index.md b/docs/en/12-taos-sql/index.md index 33656338a7bba38dc55cf536bdba8e95309c5acf..a5ffc9dc8dce158eccc0fa0519f09ba346710c31 100644 --- a/docs/en/12-taos-sql/index.md +++ b/docs/en/12-taos-sql/index.md @@ -1,22 +1,23 @@ --- title: TDengine SQL -description: "The syntax supported by TDengine SQL " +description: 'The syntax supported by TDengine SQL ' --- -This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. +This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. TDengine SQL has been enhanced in version 3.0, and the query engine has been rearchitected. For information about how TDengine SQL has changed, see [Changes in TDengine 3.0](../taos-sql/changes). -TDengine SQL is the major interface for users to write data into or query from TDengine. For ease of use, the syntax is similar to that of standard SQL. However, please note that TDengine SQL is not standard SQL. For instance, TDengine doesn't provide a delete function for time series data and so corresponding statements are not provided in TDengine SQL. +TDengine SQL is the major interface for users to write data into or query from TDengine. It uses standard SQL syntax and includes extensions and optimizations for time-series data and services. The maximum length of a TDengine SQL statement is 1 MB. Note that keyword abbreviations are not supported. For example, DELETE cannot be entered as DEL. Syntax Specifications used in this chapter: -- The content inside <\> needs to be input by the user, excluding <\> itself. +- Keywords are given in uppercase, although SQL is not case-sensitive. +- Information that you input is given in lowercase. - \[ \] means optional input, excluding [] itself. - | means one of a few options, excluding | itself. - … means the item prior to it can be repeated multiple times. -To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below: +To better demonstrate the syntax, usage and rules of TDengine SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below: -```sql +``` taos> DESCRIBE meters; Field | Type | Length | Note | ================================================================================= @@ -29,3 +30,10 @@ taos> DESCRIBE meters; ``` The data set includes the data collected by 4 meters, the corresponding table name is d1001, d1002, d1003 and d1004 based on the data model of TDengine. + +```mdx-code-block +import DocCardList from '@theme/DocCardList'; +import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; + + +``` diff --git a/docs/en/13-operation/01-pkg-install.md b/docs/en/13-operation/01-pkg-install.md index c098002962d62aa0acc7a94462c052303cb2ed90..d7713b943f5fe8fbd5e685b8ba03ff8cc8ed4e53 100644 --- a/docs/en/13-operation/01-pkg-install.md +++ b/docs/en/13-operation/01-pkg-install.md @@ -1,156 +1,77 @@ --- -title: Install & Uninstall +title: Install and Uninstall description: Install, Uninstall, Start, Stop and Upgrade --- import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -TDengine community version provides deb and rpm packages for users to choose from, based on their system environment. The deb package supports Debian, Ubuntu and derivative systems. The rpm package supports CentOS, RHEL, SUSE and derivative systems. Furthermore, a tar.gz package is provided for TDengine Enterprise customers. +This document gives more information about installing, uninstalling, and upgrading TDengine. ## Install +About details of installing TDenine, please refer to [Installation Guide](../../get-started/package/). + +## Uninstall + - + -1. Download deb package from official website, for example TDengine-server-2.4.0.7-Linux-x64.deb -2. In the directory where the package is located, execute the command below +Apt-get package of TDengine can be uninstalled as below: ```bash -$ sudo dpkg -i TDengine-server-2.4.0.7-Linux-x64.deb -(Reading database ... 137504 files and directories currently installed.) -Preparing to unpack TDengine-server-2.4.0.7-Linux-x64.deb ... +$ sudo apt-get remove tdengine +Reading package lists... Done +Building dependency tree +Reading state information... Done +The following packages will be REMOVED: + tdengine +0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded. +After this operation, 68.3 MB disk space will be freed. +Do you want to continue? [Y/n] y +(Reading database ... 135625 files and directories currently installed.) +Removing tdengine (3.0.0.0) ... TDengine is removed successfully! -Unpacking tdengine (2.4.0.7) over (2.4.0.7) ... -Setting up tdengine (2.4.0.7) ... -Start to install TDengine... - -System hostname is: ubuntu-1804 - -Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join -OR leave it blank to build one: -Enter your email address for priority support or enter empty to skip: -Created symlink /etc/systemd/system/multi-user.target.wants/taosd.service → /etc/systemd/system/taosd.service. - -To configure TDengine : edit /etc/taos/taos.cfg -To start TDengine : sudo systemctl start taosd -To access TDengine : taos -h ubuntu-1804 to login into TDengine server - - -TDengine is installed successfully! ``` - - - - -1. Download rpm package from official website, for example TDengine-server-2.4.0.7-Linux-x64.rpm; -2. In the directory where the package is located, execute the command below +Apt-get package of taosTools can be uninstalled as below: ``` -$ sudo rpm -ivh TDengine-server-2.4.0.7-Linux-x64.rpm -Preparing... ################################# [100%] -Updating / installing... - 1:tdengine-2.4.0.7-3 ################################# [100%] -Start to install TDengine... - -System hostname is: centos7 - -Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join -OR leave it blank to build one: - -Enter your email address for priority support or enter empty to skip: - -Created symlink from /etc/systemd/system/multi-user.target.wants/taosd.service to /etc/systemd/system/taosd.service. - -To configure TDengine : edit /etc/taos/taos.cfg -To start TDengine : sudo systemctl start taosd -To access TDengine : taos -h centos7 to login into TDengine server - - -TDengine is installed successfully! +$ sudo apt remove taostools +Reading package lists... Done +Building dependency tree +Reading state information... Done +The following packages will be REMOVED: + taostools +0 upgraded, 0 newly installed, 1 to remove and 0 not upgraded. +After this operation, 68.3 MB disk space will be freed. +Do you want to continue? [Y/n] +(Reading database ... 147973 files and directories currently installed.) +Removing taostools (2.1.2) ... ``` - - - -1. Download the tar.gz package, for example TDengine-server-2.4.0.7-Linux-x64.tar.gz; -2. In the directory where the package is located, first decompress the file, then switch to the sub-directory generated in decompressing, i.e. "TDengine-enterprise-server-2.4.0.7/" in this example, and execute the `install.sh` script. - -```bash -$ tar xvzf TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz -TDengine-enterprise-server-2.4.0.7/ -TDengine-enterprise-server-2.4.0.7/driver/ -TDengine-enterprise-server-2.4.0.7/driver/vercomp.txt -TDengine-enterprise-server-2.4.0.7/driver/libtaos.so.2.4.0.7 -TDengine-enterprise-server-2.4.0.7/install.sh -TDengine-enterprise-server-2.4.0.7/examples/ -... - -$ ll -total 43816 -drwxrwxr-x 3 ubuntu ubuntu 4096 Feb 22 09:31 ./ -drwxr-xr-x 20 ubuntu ubuntu 4096 Feb 22 09:30 ../ -drwxrwxr-x 4 ubuntu ubuntu 4096 Feb 22 09:30 TDengine-enterprise-server-2.4.0.7/ --rw-rw-r-- 1 ubuntu ubuntu 44852544 Feb 22 09:31 TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz - -$ cd TDengine-enterprise-server-2.4.0.7/ - - $ ll -total 40784 -drwxrwxr-x 4 ubuntu ubuntu 4096 Feb 22 09:30 ./ -drwxrwxr-x 3 ubuntu ubuntu 4096 Feb 22 09:31 ../ -drwxrwxr-x 2 ubuntu ubuntu 4096 Feb 22 09:30 driver/ -drwxrwxr-x 10 ubuntu ubuntu 4096 Feb 22 09:30 examples/ --rwxrwxr-x 1 ubuntu ubuntu 33294 Feb 22 09:30 install.sh* --rw-rw-r-- 1 ubuntu ubuntu 41704288 Feb 22 09:30 taos.tar.gz - -$ sudo ./install.sh - -Start to update TDengine... -Created symlink /etc/systemd/system/multi-user.target.wants/taosd.service → /etc/systemd/system/taosd.service. -Nginx for TDengine is updated successfully! - -To configure TDengine : edit /etc/taos/taos.cfg -To configure Taos Adapter (if has) : edit /etc/taos/taosadapter.toml -To start TDengine : sudo systemctl start taosd -To access TDengine : use taos -h ubuntu-1804 in shell OR from http://127.0.0.1:6060 - -TDengine is updated successfully! -Install taoskeeper as a standalone service -taoskeeper is installed, enable it by `systemctl enable taoskeeper` -``` - -:::info -Users will be prompted to enter some configuration information when install.sh is executing. The interactive mode can be disabled by executing `./install.sh -e no`. `./install.sh -h` can show all parameters with detailed explanation. - -::: - - - - -:::note -When installing on the first node in the cluster, at the "Enter FQDN:" prompt, nothing needs to be provided. When installing on subsequent nodes, at the "Enter FQDN:" prompt, you must enter the end point of the first dnode in the cluster if it is already up. You can also just ignore it and configure it later after installation is finished. - -::: - -## Uninstall - - Deb package of TDengine can be uninstalled as below: -```bash +``` $ sudo dpkg -r tdengine (Reading database ... 137504 files and directories currently installed.) -Removing tdengine (2.4.0.7) ... +Removing tdengine (3.0.0.0) ... TDengine is removed successfully! ``` +Deb package of taosTools can be uninstalled as below: + +``` +$ sudo dpkg -r taostools +(Reading database ... 147973 files and directories currently installed.) +Removing taostools (2.1.2) ... +``` + @@ -162,6 +83,13 @@ $ sudo rpm -e tdengine TDengine is removed successfully! ``` +RPM package of taosTools can be uninstalled as below: + +``` +sudo rpm -e taostools +taosToole is removed successfully! +``` + @@ -170,115 +98,69 @@ tar.gz package of TDengine can be uninstalled as below: ``` $ rmtaos -Nginx for TDengine is running, stopping it... TDengine is removed successfully! - -taosKeeper is removed successfully! -``` - - - - -:::note - -- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine. -- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed. - -```bash - $ sudo rm -f /var/lib/dpkg/info/tdengine* ``` -- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed. +tar.gz package of taosTools can be uninstalled as below: -```bash - $ sudo rpm -e --noscripts tdengine ``` +$ rmtaostools +Start to uninstall taos tools ... -::: - -## Installation Directory - -TDengine is installed at /usr/local/taos if successful. - -```bash -$ cd /usr/local/taos -$ ll -$ ll -total 28 -drwxr-xr-x 7 root root 4096 Feb 22 09:34 ./ -drwxr-xr-x 12 root root 4096 Feb 22 09:34 ../ -drwxr-xr-x 2 root root 4096 Feb 22 09:34 bin/ -drwxr-xr-x 2 root root 4096 Feb 22 09:34 cfg/ -lrwxrwxrwx 1 root root 13 Feb 22 09:34 data -> /var/lib/taos/ -drwxr-xr-x 2 root root 4096 Feb 22 09:34 driver/ -drwxr-xr-x 10 root root 4096 Feb 22 09:34 examples/ -drwxr-xr-x 2 root root 4096 Feb 22 09:34 include/ -lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/ +taos tools is uninstalled successfully! ``` -During the installation process: - -- Configuration directory, data directory, and log directory are created automatically if they don't exist -- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg -- The default data directory is /var/lib/taos, which is a soft link to /usr/local/taos/data -- The default log directory is /var/log/taos, which is a soft link to /usr/local/taos/log -- The executables at /usr/local/taos/bin are linked to /usr/bin -- The DLL files at /usr/local/taos/driver are linked to /usr/lib -- The header files at /usr/local/taos/include are linked to /usr/include - -:::note + + +Run C:\TDengine\unins000.exe to uninstall TDengine on a Windows system. + + -- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data. -- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used. +:::info -## Start and Stop +- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine. The packages may affect each other and cause errors. -Linux system services `systemd`, `systemctl` or `service` are used to start, stop and restart TDengine. The server process of TDengine is `taosd`, which is started automatically after the Linux system is started. System operators can use `systemd`, `systemctl` or `service` to start, stop or restart TDengine server. +- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. -For example, if using `systemctl` , the commands to start, stop, restart and check TDengine server are below: + ``` + $ sudo rm -f /var/lib/dpkg/info/tdengine* + ``` -- Start server:`systemctl start taosd` +You can then reinstall if needed. -- Stop server:`systemctl stop taosd` +- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. -- Restart server:`systemctl restart taosd` + ``` + $ sudo rpm -e --noscripts tdengine + ``` -- Check server status:`systemctl status taosd` +You can then reinstall if needed. -From version 2.4.0.0, a new independent component named as `taosAdapter` has been included in TDengine. `taosAdapter` should be started and stopped using `systemctl`. +::: -If the server process is OK, the output of `systemctl status` is like below: +Uninstalling and Modifying Files -``` -Active: active (running) -``` +- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data. -Otherwise, the output is as below: +- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used. -``` -Active: inactive (dead) -``` ## Upgrade - There are two aspects in upgrade operation: upgrade installation package and upgrade a running server. To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version. Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below: - - Stop inserting data - Make sure all data is persisted to disk -- Make some simple queries (Such as total rows in stables, tables and so on. Note down the values. Follow best practices and relevant SOPs.) - Stop the cluster of TDengine - Uninstall old version and install new version - Start the cluster of TDengine -- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss +- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss - Run some simple data insertion statements to make sure the cluster works well - Restore business services :::warning - TDengine doesn't guarantee any lower version is compatible with the data generated by a higher version, so it's never recommended to downgrade the version. ::: diff --git a/docs/en/13-operation/02-planning.mdx b/docs/en/13-operation/02-planning.mdx index c1baf92dbfa8d93f83174c05c2ea631d1a469739..2dffa7bb8747e21e4754740208eafed65d341217 100644 --- a/docs/en/13-operation/02-planning.mdx +++ b/docs/en/13-operation/02-planning.mdx @@ -1,40 +1,32 @@ --- +sidebar_label: Resource Planning title: Resource Planning --- It is important to plan computing and storage resources if using TDengine to build an IoT, time-series or Big Data platform. How to plan the CPU, memory and disk resources required, will be described in this chapter. -## Memory Requirement of Server Side +## Server Memory Requirements -By default, the number of vgroups created for each database is the same as the number of CPU cores. This can be configured by the parameter `maxVgroupsPerDb`. Each vnode in a vgroup stores one replica. Each vnode consumes a fixed amount of memory, i.e. `blocks` \* `cache`. In addition, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using the formula below: +Each database creates a fixed number of vgroups. This number is 2 by default and can be configured with the `vgroups` parameter. The number of replicas can be controlled with the `replica` parameter. Each replica requires one vnode per vgroup. Altogether, the memory required by each database depends on the following configuration options: -``` -Database Memory Size = maxVgroupsPerDb * replica * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB) -``` +- vgroups +- replica +- buffer +- pages +- pagesize +- cachesize -For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` is 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M. +For more information, see [Database](../../taos-sql/database). -In the real operation of TDengine, we are more concerned about the memory used by each TDengine server process `taosd`. +The memory required by a database is therefore greater than or equal to: ``` - taosd_memory = vnode_memory + mnode_memory + query_memory +vgroups * replica * (buffer + pages * pagesize + cachesize) ``` -In the above formula: - -1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula for Database Memory Size, mentioned above, then dividing by number of dnodes and multiplying the number of replicas. - -``` - vnode_memory = (sum(Database Memory Size) / number_of_dnodes) * replica -``` - -2. "mnode_memory" of a `taosd` process is the memory consumed by a mnode. If there is one (and only one) mnode hosted in a `taosd` process, the memory consumed by "mnode" is "0.2KB \* the total number of tables in the cluster". - -3. "query_memory" is the memory used when processing query requests. Each ongoing query consumes at least "0.2 KB \* total number of involved tables". - -Please note that the above formulas can only be used to estimate the minimum memory requirement, instead of maximum memory usage. In a real production environment, it's better to reserve some redundance beyond the estimated minimum memory requirement. If memory is abundant, it's suggested to increase the value of parameter `blocks` to speed up data insertion and data query. +However, note that this requirement is spread over all dnodes in the cluster, not on a single physical machine. The physical servers that run dnodes meet the requirement together. If a cluster has multiple databases, the memory required increases accordingly. In complex environments where dnodes were added after initial deployment in response to increasing resource requirements, load may not be balanced among the original dnodes and newer dnodes. In this situation, the actual status of your dnodes is more important than theoretical calculations. -## Memory Requirement of Client Side +## Client Memory Requirements For the client programs using TDengine client driver `taosc` to connect to the server side there is a memory requirement as well. @@ -56,10 +48,10 @@ So, at least 3GB needs to be reserved for such a client. The CPU resources required depend on two aspects: -- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The difference in computing resource consumed, between inserting 1 row at a time, and inserting 10 rows at a time is very small. So, the more the number of rows that can be inserted one time, the higher the efficiency. Inserting in batch also imposes requirements on the client side which needs to cache rows to insert in batch once the number of cached rows reaches a threshold. +- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The difference in computing resource consumed, between inserting 1 row at a time, and inserting 10 rows at a time is very small. So, the more the number of rows that can be inserted one time, the higher the efficiency. If each insert request contains more than 200 records, a single core can process more than 1 million records per second. Inserting in batch also imposes requirements on the client side which needs to cache rows to insert in batch once the number of cached rows reaches a threshold. - **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, and other requirements provided by users. -In short, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. In real operation, it's suggested to control CPU usage below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources. +In short, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. If possible, ensure that CPU usage remains below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources. ## Disk Requirement @@ -77,6 +69,6 @@ To increase performance, multiple disks can be setup for parallel data reading o ## Number of Hosts -A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulae mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily. +A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulae mentioned previously. If the number of data replicas is not 1, the required resources are multiplied by the number of replicas. -**Quick Estimation for CPU, Memory and Disk** Please refer to [Resource Estimate](https://www.taosdata.com/config/config.html). +Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily. diff --git a/docs/en/13-operation/03-tolerance.md b/docs/en/13-operation/03-tolerance.md index d4d48d7fcdc2c990b6ea0821e2347c70a809ed79..21a5a902822d7b85f555114a112686d4e35c64aa 100644 --- a/docs/en/13-operation/03-tolerance.md +++ b/docs/en/13-operation/03-tolerance.md @@ -1,6 +1,5 @@ --- -sidebar_label: Fault Tolerance -title: Fault Tolerance & Disaster Recovery +title: Fault Tolerance and Disaster Recovery --- ## Fault Tolerance @@ -11,22 +10,21 @@ When a data block is received by TDengine, the original data block is first writ There are 2 configuration parameters related to WAL: -- walLevel: - - 0:wal is disabled - - 1:wal is enabled without fsync - - 2:wal is enabled with fsync -- fsync:This parameter is only valid when walLevel is set to 2. It specifies the interval, in milliseconds, of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written. +- wal_level: Specifies the WAL level. 1 indicates that WAL is enabled but fsync is disabled. 2 indicates that WAL and fsync are both enabled. The default value is 1. +- wal_fsync_period: This parameter is only valid when wal_level is set to 2. It specifies the interval, in milliseconds, of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written. -To achieve absolutely no data loss, walLevel should be set to 2 and fsync should be set to 1. There is a performance penalty to the data ingestion rate. However, if the concurrent data insertion threads on the client side can reach a big enough number, for example 50, the data ingestion performance will be still good enough. Our verification shows that the drop is only 30% when fsync is set to 3,000 milliseconds. +To achieve absolutely no data loss, set wal_level to 2 and wal_fsync_period to 0. There is a performance penalty to the data ingestion rate. However, if the concurrent data insertion threads on the client side can reach a big enough number, for example 50, the data ingestion performance will be still good enough. Our verification shows that the drop is only 30% when wal_fsync_period is set to 3000 milliseconds. ## Disaster Recovery -TDengine uses replication to provide high availability and disaster recovery capability. +TDengine uses replication to provide high availability. -A TDengine cluster is managed by mnode. To ensure the high availability of mnode, multiple replicas can be configured by the system parameter `numOfMnodes`. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency. +A TDengine cluster is managed by mnodes. You can configure up to three mnodes to ensure high availability. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency. -The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1. +The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, the parameter `replica` is used to specify the number of replicas. To achieve high availability, set `replica` to 3. The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create a table. As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is higher than 1, high availability can be achieved without any other assistance. For disaster recovery, dnodes of a TDengine cluster should be deployed in geographically different data centers. + +Alternatively, you can use taosX to synchronize the data from one TDengine cluster to another cluster in a remote location. However, taosX is only available in TDengine enterprise version, for more information please contact tdengine.com. diff --git a/docs/en/13-operation/06-admin.md b/docs/en/13-operation/06-admin.md deleted file mode 100644 index 458a91b88c6d8319fe8b84c2b34d8ff968957910..0000000000000000000000000000000000000000 --- a/docs/en/13-operation/06-admin.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: User Management ---- - -A system operator can use TDengine CLI `taos` to create or remove users or change passwords. The SQL commands are documented below: - -## Create User - -```sql -CREATE USER PASS <'password'>; -``` - -When creating a user and specifying the user name and password, the password needs to be quoted using single quotes. - -## Drop User - -```sql -DROP USER ; -``` - -Dropping a user can only be performed by root. - -## Change Password - -```sql -ALTER USER PASS <'password'>; -``` - -To keep the case of the password when changing password, the password needs to be quoted using single quotes. - -## Change Privilege - -```sql -ALTER USER PRIVILEGE ; -``` - -The privileges that can be changed to are `read` or `write` without single quotes. - -Note:there is another privilege `super`, which is not allowed to be authorized to any user. - -## Show Users - -```sql -SHOW USERS; -``` - -:::note -In SQL syntax, `< >` means the part that needs to be input by the user, excluding the `< >` itself. - -::: diff --git a/docs/en/13-operation/09-status.md b/docs/en/13-operation/09-status.md deleted file mode 100644 index 51396524ea281ae665c9fdf61d2e6e6202995537..0000000000000000000000000000000000000000 --- a/docs/en/13-operation/09-status.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -sidebar_label: Connections & Tasks -title: Manage Connections and Query Tasks ---- - -A system operator can use the TDengine CLI to show connections, ongoing queries, stream computing, and can close connections or stop ongoing query tasks or stream computing. - -## Show Connections - -```sql -SHOW CONNECTIONS; -``` - -One column of the output of the above SQL command is "ip:port", which is the end point of the client. - -## Force Close Connections - -```sql -KILL CONNECTION ; -``` - -In the above SQL command, `connection-id` is from the first column of the output of `SHOW CONNECTIONS`. - -## Show Ongoing Queries - -```sql -SHOW QUERIES; -``` - -The first column of the output is query ID, which is composed of the corresponding connection ID and the sequence number of the current query task started on this connection. The format is "connection-id:query-no". - -## Force Close Queries - -```sql -KILL QUERY ; -``` - -In the above SQL command, `query-id` is from the first column of the output of `SHOW QUERIES `. - -## Show Continuous Query - -```sql -SHOW STREAMS; -``` - -The first column of the output is stream ID, which is composed of the connection ID and the sequence number of the current stream started on this connection. The format is "connection-id:stream-no". - -## Force Close Continuous Query - -```sql -KILL STREAM ; -``` - -The above SQL command, `stream-id` is from the first column of the output of `SHOW STREAMS`. diff --git a/docs/en/13-operation/17-diagnose.md b/docs/en/13-operation/17-diagnose.md index 2b474fddba4af5ba0c29103cd8ab1249d10d055b..d01d12e831956e6a6db654e1f6dbf5072ac6b243 100644 --- a/docs/en/13-operation/17-diagnose.md +++ b/docs/en/13-operation/17-diagnose.md @@ -13,110 +13,59 @@ Diagnostic steps: 1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd. 2. On the server side, execute command `taos -n server -P -l ` to monitor the port range starting from the port specified by `-P` parameter with the role of "server". 3. On the client side, execute command `taos -n client -h -P -l ` to send a testing package to the specified server and port. - --l : The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000. Please note that the package length must be same in the above 2 commands executed on server side and client side respectively. + +-l : The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000. +Please note that the package length must be same in the above 2 commands executed on server side and client side respectively. Output of the server side for the example is below: ```bash -# taos -n server -P 6000 -12/21 14:50:13.522509 0x7f536f455200 UTL work as server, host:172.27.0.7 startPort:6000 endPort:6011 pkgLen:1000 - -12/21 14:50:13.522659 0x7f5352242700 UTL TCP server at port:6000 is listening -12/21 14:50:13.522727 0x7f5351240700 UTL TCP server at port:6001 is listening -... -... +# taos -n server -P 6030 -l 1000 +network test server is initialized, port:6030 +request is received, size:1000 +request is received, size:1000 ... -12/21 14:50:13.523954 0x7f5342fed700 UTL TCP server at port:6011 is listening -12/21 14:50:13.523989 0x7f53437ee700 UTL UDP server at port:6010 is listening -12/21 14:50:13.524019 0x7f53427ec700 UTL UDP server at port:6011 is listening -12/21 14:50:22.192849 0x7f5352242700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6000 -12/21 14:50:22.192993 0x7f5352242700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6000 -12/21 14:50:22.237082 0x7f5351a41700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6000 -12/21 14:50:22.237203 0x7f5351a41700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6000 -12/21 14:50:22.237450 0x7f5351240700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6001 -12/21 14:50:22.237576 0x7f5351240700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6001 -12/21 14:50:22.281038 0x7f5350a3f700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6001 -12/21 14:50:22.281141 0x7f5350a3f700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6001 ... ... -... -12/21 14:50:22.677443 0x7f5342fed700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6011 -12/21 14:50:22.677576 0x7f5342fed700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6011 -12/21 14:50:22.721144 0x7f53427ec700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6011 -12/21 14:50:22.721261 0x7f53427ec700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6011 +request is received, size:1000 +request is received, size:1000 ``` Output of the client side for the example is below: ```bash # taos -n client -h 172.27.0.7 -P 6000 -12/21 14:50:22.192434 0x7fc95d859200 UTL work as client, host:172.27.0.7 startPort:6000 endPort:6011 pkgLen:1000 - -12/21 14:50:22.192472 0x7fc95d859200 UTL server ip:172.27.0.7 is resolved from host:172.27.0.7 -12/21 14:50:22.236869 0x7fc95d859200 UTL successed to test TCP port:6000 -12/21 14:50:22.237215 0x7fc95d859200 UTL successed to test UDP port:6000 +taos -n client -h v3s2 -P 6030 -l 1000 +network test client is initialized, the server is v3s2:6030 +request is sent, size:1000 +response is received, size:1000 +request is sent, size:1000 +response is received, size:1000 ... ... ... -12/21 14:50:22.676891 0x7fc95d859200 UTL successed to test TCP port:6010 -12/21 14:50:22.677240 0x7fc95d859200 UTL successed to test UDP port:6010 -12/21 14:50:22.720893 0x7fc95d859200 UTL successed to test TCP port:6011 -12/21 14:50:22.721274 0x7fc95d859200 UTL successed to test UDP port:6011 -``` - -The output needs to be checked carefully for the system operator to find the root cause and resolve the problem. - -## Startup Status and RPC Diagnostic - -`taos -n startup -h ` can be used to check the startup status of a `taosd` process. This is a common task which should be performed by a system operator, especially in the case of a cluster, to determine whether `taosd` has been started successfully. - -`taos -n rpc -h ` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or is working abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's a network problem or whether `taosd` is abnormal. - -## Sync and Arbitrator Diagnostic +request is sent, size:1000 +response is received, size:1000 +request is sent, size:1000 +response is received, size:1000 -```bash -taos -n sync -P 6040 -h -taos -n sync -P 6042 -h +total succ: 100/100 cost: 16.23 ms speed: 5.87 MB/s ``` -The above commands can be executed in a Linux shell to check whether the port for sync is working well and whether the sync module on the server side is working well. Additionally, `-P 6042` is used to check whether the arbitrator is configured properly and is working well. - -## Network Speed Diagnostic - -`taos -n speed -h -P 6030 -N 10 -l 10000000 -S TCP` - -From version 2.2.0.0 onwards, the above command can be executed in a Linux shell to test network speed. The command sends uncompressed packages to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below: - --n:When set to "speed", it means testing network speed. --h:The FQDN or IP of the server process to be connected to; if not set, the FQDN configured in `taos.cfg` is used. --P:The port of the server process to connect to, the default value is 6030. --N:The number of packages that will be sent in the test, range is [1,10000], default value is 100. --l:The size of each package in bytes, range is [1024, 1024 \* 1024 \* 1024], default value is 1024. --S:The type of network packages to send, can be either TCP or UDP, default value is TCP. - -## FQDN Resolution Diagnostic - -`taos -n fqdn -h ` - -From version 2.2.0.0 onward, the above command can be executed in a Linux shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below: - --n:When set to "fqdn", it means testing the speed of resolving FQDN. --h:The FQDN to be resolved. If not set, the `FQDN` parameter in `taos.cfg` is used by default. +The output needs to be checked carefully for the system operator to find the root cause and resolve the problem. ## Server Log -The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively. - -Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily and so on the server side, important information is stored in a different place from other logs. +The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively. -- The log at level of INFO, WARNING and ERROR is stored in `taosinfo` so that it is easy to find important information -- The log at level of DEBUG (135) and TRACE (143) and other information not handled by `taosinfo` are stored in `taosdlog` +Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. Ensure that the disk drive on which logs are stored has sufficient space. ## Client Log -An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded. As stated above, for debugging and tracing, it needs to be changed to 135 or 143 respectively, so that logs at DEBUG or TRACE level can be recorded. +An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The parameter `debugFlag` is used to control the log level. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively. + +The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded. As stated above, for debugging and tracing, it needs to be changed to 135 or 143 respectively, so that logs at DEBUG or TRACE level can be recorded. The maximum length of a single log file is controlled by parameter `numOfLogLines` and only 2 log files are kept for each `taosd` server process. -Log files are written in an async way to minimize the workload on disk, but the trade off for performance is that a few log lines may be lost in some extreme conditions. +Log files are written in an async way to minimize the workload on disk, but the trade off for performance is that a few log lines may be lost in some extreme conditions. You can configure asynclog to 0 when needed for troubleshooting purposes to ensure that no log information is lost. diff --git a/docs/en/14-reference/02-rest-api/02-rest-api.mdx b/docs/en/14-reference/02-rest-api/02-rest-api.mdx index fe18349a6dae3ad44772b4a30a2c3d4ad75b0f47..ce28ee87d9317487d5c610d23287775be6b753ec 100644 --- a/docs/en/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs/en/14-reference/02-rest-api/02-rest-api.mdx @@ -2,15 +2,15 @@ title: REST API --- -To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database. +To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database. -:::note -One difference from the native connector is that the REST interface is stateless and so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name in the prefix. (Since version 2.2.0.0, TDengine supports specification of the db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. Since version 2.4.0.0, REST service is provided by taosAdapter by default and it requires that the `db_name` must be specified in the URL.) +:::note +One difference from the native connector is that the REST interface is stateless and so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name in the prefix. TDengine supports specification of the db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. ::: ## Installation -The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language only needs to support the HTTP protocol. +The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language only needs to support the HTTP protocol. The REST interface is provided by [taosAdapter](../taosadapter), to use REST interface you need to make sure `taosAdapter` is running properly. ## Verification @@ -18,86 +18,77 @@ If the TDengine server is already installed, it can be verified as follows: The following example is in an Ubuntu environment and uses the `curl` tool to verify that the REST interface is working. Note that the `curl` tool may need to be installed in your environment. -The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number. +The following example lists all databases on the host h1.tdengine.com. To use it in your environment, replace `h1.tdengine.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number. -```html -curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.taosdata.com:6041/rest/sql +```bash +curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" \ + -d "select name, ntables, status from information_schema.ins_databases;" \ + h1.tdengine.com:6041/rest/sql ``` The following return value results indicate that the verification passed. ```json { - "status": "succ", - "head": [ - "name", - "created_time", - "ntables", - "vgroups", - "replica", - "quorum", - "days", - "keep1,keep2,keep(D)", - "cache(MB)", - "blocks", - "minrows", - "maxrows", - "wallevel", - "fsync", - "comp", - "precision", - "status" - ], - "data": [ - [ - "log", - "2020-09-02 17:23:00.039", - 4, - 1, - 1, - 1, - 10, - "30,30,30", - 1, - 3, - 100, - 4096, - 1, - 3000, - 2, - "us", - "ready" - ] - ], - "rows": 1 + "code": 0, + "column_meta": [ + [ + "name", + "VARCHAR", + 64 + ], + [ + "ntables", + "BIGINT", + 8 + ], + [ + "status", + "VARCHAR", + 10 + ] + ], + "data": [ + [ + "information_schema", + 16, + "ready" + ], + [ + "performance_schema", + 9, + "ready" + ] + ], + "rows": 2 } ``` ## HTTP request URL format -``` +```text http://:/rest/sql/[db_name] ``` Parameter Description: -- fqnd: FQDN or IP address of any host in the cluster -- port: httpPort configuration item in the configuration file, default is 6041 -- db_name: Optional parameter that specifies the default database name for the executed SQL command. (supported since version 2.2.0.0) +- fqnd: FQDN or IP address of any host in the cluster. +- port: httpPort configuration item in the configuration file, default is 6041. +- db_name: Optional parameter that specifies the default database name for the executed SQL command. For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:6041` and sets the default database name to `test`. TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication. -- The custom authentication information is as follows. More details about "token" later. +- authentication information is shown below: - ``` + ```text Authorization: Taosd ``` -- Basic authentication information is shown below +- Basic authentication information is shown below: - ``` + ```text Authorization: Basic ``` @@ -109,51 +100,148 @@ Use `curl` to initiate an HTTP request with a custom authentication method, with curl -L -H "Authorization: Basic " -d "" :/rest/sql/[db_name] ``` -Or +or ```bash curl -L -u username:password -d "" :/rest/sql/[db_name] ``` -where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.g. `root:taosdata` is encoded as `cm9vdDp0YW9zZGF0YQ==`. +where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.g. `root:taosdata` is encoded as `cm9vdDp0YW9zZGF0YQ==`.. ## HTTP Return Format -The return result is in JSON format, as follows: +### HTTP Response Code + +| **Response Code** | **Description** | +|-------------------|----------------| +| 200 | Success. (Also used for C interface errors.) | +| 400 | Parameter error | +| 401 | Authentication failure | +| 404 | Interface not found | +| 500 | Internal error | +| 503 | Insufficient system resources | + +### HTTP body structure + +#### Successful Operation + +Example: ```json { - "status": "succ", - "head": ["ts", "current", ...], - "column_meta": [["ts",9,8],["current",6,4], ...], - "data": [ - ["2018-10-03 14:38:05.000", 10.3, ...], - ["2018-10-03 14:38:15.000", 12.6, ...] + "code": 0, + "column_meta": [["affected_rows", "INT", 4]], + "data": [[0]], + "rows": 1 +} +``` + +Description: + +- code: (`int`) 0 indicates success. +- column_meta: (`[1][3]any`) Only returns `[["affected_rows", "INT", 4]]`. +- rows: (`int`) Only returns `1`. +- data: (`[][]any`) Returns the number of rows affected. + +#### Successful Query + +Example: + +```json +{ + "code": 0, + "column_meta": [ + ["ts", "TIMESTAMP", 8], + ["count", "BIGINT", 8], + ["endpoint", "VARCHAR", 45], + ["status_code", "INT", 4], + ["client_ip", "VARCHAR", 40], + ["request_method", "VARCHAR", 15], + ["request_uri", "VARCHAR", 128] + ], + "data": [ + [ + "2022-06-29T05:50:55.401Z", + 2, + "LAPTOP-NNKFTLTG:6041", + 200, + "172.23.208.1", + "POST", + "/rest/sql" ], - "rows": 2 + [ + "2022-06-29T05:52:16.603Z", + 1, + "LAPTOP-NNKFTLTG:6041", + 200, + "172.23.208.1", + "POST", + "/rest/sql" + ], + [ + "2022-06-29T06:28:14.118Z", + 1, + "LAPTOP-NNKFTLTG:6041", + 200, + "172.23.208.1", + "POST", + "/rest/sql" + ], + [ + "2022-06-29T05:52:16.603Z", + 2, + "LAPTOP-NNKFTLTG:6041", + 401, + "172.23.208.1", + "POST", + "/rest/sql" + ] + ], + "rows": 4 +} +``` + +Description: + +- code: `int` 0 indicates success. +- column_meta: (`[][3]any`) Column information. Each column is described with three values: column name (string), column type (string), and type length (int). +- rows: (`int`) The number of rows returned. +- data: (`[][]any`) + +The following types may be returned: + +- "NULL" +- "BOOL" +- "TINYINT" +- "SMALLINT" +- "INT" +- "BIGINT" +- "FLOAT" +- "DOUBLE" +- "VARCHAR" +- "TIMESTAMP" +- "NCHAR" +- "TINYINT UNSIGNED" +- "SMALLINT UNSIGNED" +- "INT UNSIGNED" +- "BIGINT UNSIGNED" +- "JSON" + +#### Errors + +Example: + +```json +{ + "code": 9728, + "desc": "syntax error near \"1\"" } ``` Description: -- status: tells you whethre the operation result is success or failure. -- head: the definition of the table, or just one column "affected_rows" if no result set is returned. (As of version 2.0.17.0, it is recommended not to rely on the head return value to determine the data column type but rather use column_meta. In later versions, the head item may be removed from the return value.) -- column_meta: this item is added to the return value to indicate the data type of each column in the data with version 2.0.17.0 and later versions. Each column is described by three values: column name, column type, and type length. For example, `["current",6,4]` means that the column name is "current", the column type is 6, which is the float type, and the type length is 4, which is the float type with 4 bytes. If the column type is binary or nchar, the type length indicates the maximum length of content stored in the column, not the length of the specific data in this return value. When the column type is nchar, the type length indicates the number of Unicode characters that can be saved, not bytes. -- data: The exact data returned, presented row by row, or just [[affected_rows]] if no result set is returned. The order of the data columns in each row of data is the same as that of the data columns described in column_meta. -- rows: Indicates how many rows of data there are. - -The column types in column_meta are described as follows: - -- 1:BOOL -- 2:TINYINT -- 3:SMALLINT -- 4:INT -- 5:BIGINT -- 6:FLOAT -- 7:DOUBLE -- 8:BINARY -- 9:TIMESTAMP -- 10:NCHAR +- code: (`int`) Error code. +- desc: (`string`): Error code description. ## Custom Authorization Code @@ -165,11 +253,9 @@ curl http://:/rest/login// Where `fqdn` is the FQDN or IP address of the TDengine database. `port` is the port number of the TDengine service. `username` is the database username. `password` is the database password. The return value is in `JSON` format, and the meaning of each field is as follows. -- status: flag bit of the request result - -- code: return value code - -- desc: authorization code +- status: flag bit of the request result. +- code: return value code. +- desc: authorization code. Example of getting authorization code. @@ -187,7 +273,7 @@ Response body: } ``` -## For example +## Usage examples - query all records from table d1001 of database demo @@ -199,19 +285,44 @@ Response body: ```json { - "status": "succ", - "head": ["ts", "current", "voltage", "phase"], - "column_meta": [ - ["ts", 9, 8], - ["current", 6, 4], - ["voltage", 4, 4], - ["phase", 6, 4] - ], - "data": [ - ["2018-10-03 14:38:05.000", 10.3, 219, 0.31], - ["2018-10-03 14:38:15.000", 12.6, 218, 0.33] - ], - "rows": 2 + "code": 0, + "column_meta": [ + [ + "ts", + "TIMESTAMP", + 8 + ], + [ + "current", + "FLOAT", + 4 + ], + [ + "voltage", + "INT", + 4 + ], + [ + "phase", + "FLOAT", + 4 + ] + ], + "data": [ + [ + "2022-07-30T06:44:40.32Z", + 10.3, + 219, + 0.31 + ], + [ + "2022-07-30T06:44:41.32Z", + 12.6, + 218, + 0.33 + ] + ], + "rows": 2 } ``` @@ -225,83 +336,23 @@ Response body: ```json { - "status": "succ", - "head": ["affected_rows"], - "column_meta": [["affected_rows", 4, 4]], - "data": [[1]], - "rows": 1 + "code": 0, + "column_meta": [ + [ + "affected_rows", + "INT", + 4 + ] + ], + "data": [ + [ + 0 + ] + ], + "rows": 1 } ``` -## Other Uses - -### Unix timestamps for result sets - -When the HTTP request URL uses `/rest/sqlt`, the returned result set's timestamp value will be in Unix timestamp format, for example: - -```bash -curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sqlt -``` - -Response body: - -```json -{ - "status": "succ", - "head": ["ts", "current", "voltage", "phase"], - "column_meta": [ - ["ts", 9, 8], - ["current", 6, 4], - ["voltage", 4, 4], - ["phase", 6, 4] - ], - "data": [ - [1538548685000, 10.3, 219, 0.31], - [1538548695000, 12.6, 218, 0.33] - ], - "rows": 2 -} -``` - -### UTC format for the result set +## Reference -When the HTTP request URL uses `/rest/sqlutc`, the timestamp of the returned result set will be expressed as a UTC format, for example: - -```bash - curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.t1" 192.168.0.1:6041/rest/sqlutc -``` - -Response body: - -```json -{ - "status": "succ", - "head": ["ts", "current", "voltage", "phase"], - "column_meta": [ - ["ts", 9, 8], - ["current", 6, 4], - ["voltage", 4, 4], - ["phase", 6, 4] - ], - "data": [ - ["2018-10-03T14:38:05.000+0800", 10.3, 219, 0.31], - ["2018-10-03T14:38:15.000+0800", 12.6, 218, 0.33] - ], - "rows": 2 -} -``` - -## Important configuration items - -Only some configuration parameters related to the RESTful interface are listed below. Please see the description in the configuration file for other system parameters. - -- The port number of the external RESTful service is bound to 6041 by default (the actual value is serverPort + 11, so it can be changed by modifying the setting of the serverPort parameter). -- httpMaxThreads: the number of threads to start, default is 2 (the default value is rounded down to half of the CPU cores with version 2.0.17.0 and later versions). -- restfulRowLimit: the maximum number of result sets (in JSON format) to return. The default value is 10240. -- httpEnableCompress: whether to support compression, the default is not supported. Currently, TDengine only supports the gzip compression format. -- httpDebugFlag: logging switch, default is 131. 131: error and alarm messages only, 135: debug messages, 143: very detailed debug messages. -- httpDbNameMandatory: users must specify the default database name in the RESTful URL. The default is 0, which turns off this check. If set to 1, users must put a default database name in every RESTful URL. Otherwise, it will return an execution error and reject this SQL statement, regardless of whether the SQL statement executed at this time requires a specified database. - -:::note -If you are using the REST API provided by taosd, you should write the above configuration in taosd's configuration file taos.cfg. If you use the REST API of taosAdapter, you need to refer to taosAdapter [corresponding configuration method](/reference/taosadapter/). -::: +[taosAdapter](/reference/taosadapter/) diff --git a/docs/en/14-reference/03-connector/cpp.mdx b/docs/en/14-reference/03-connector/03-cpp.mdx similarity index 78% rename from docs/en/14-reference/03-connector/cpp.mdx rename to docs/en/14-reference/03-connector/03-cpp.mdx index e0cdf2bf2ce7eed06cacaf71a5b9baf56a3aee2b..02d7df48db540a3eb44379ada7332b2838924212 100644 --- a/docs/en/14-reference/03-connector/cpp.mdx +++ b/docs/en/14-reference/03-connector/03-cpp.mdx @@ -1,10 +1,9 @@ --- -sidebar_position: 1 sidebar_label: C/C++ title: C/C++ Connector --- -C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use the C/C++ connector you must include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs. The application also needs to link to the corresponding dynamic libraries on the platform where it is located. +C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use the C/C++ connector you must include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs. The application also needs to link to the corresponding dynamic libraries on the platform where it is located. ```c #include @@ -12,8 +11,8 @@ C/C++ developers can use TDengine's client driver and the C/C++ connector, to de After TDengine server or client installation, `taos.h` is located at -- Linux: `/usr/local/taos/include` -- Windows: `C:\TDengine\include` +- Linux:`/usr/local/taos/include` +- Windows:`C:\TDengine\include` The dynamic libraries for the TDengine client driver are located in. @@ -28,7 +27,7 @@ Please refer to [list of supported platforms](/reference/connector#supported-pla The version number of the TDengine client driver and the version number of the TDengine server should be same. A lower version of the client driver is compatible with a higher version of the server, if the first three version numbers are the same (i.e., only the fourth version number is different). For e.g. if the client version is x.y.z.1 and the server version is x.y.z.2 the client and server are compatible. But in general we do not recommend using a lower client version with a newer server version. It is also strongly discouraged to use a higher version of the client driver to access a lower version of the TDengine server. -## Installation steps +## Installation Steps Please refer to the [Installation Steps](/reference/connector#installation-steps) for TDengine client driver installation @@ -45,7 +44,7 @@ The following is sample code for establishing a connection, which omits the quer exit(1); } - /* put your code here for query and writing */ + /* put your code here for read and write */ taos_close(taos); taos_cleanup(); @@ -60,7 +59,7 @@ In the above example code, `taos_connect()` establishes a connection to port 603 ::: -## Example program +## Sample program This section shows sample code for standard access methods to TDengine clusters using the client driver. @@ -125,7 +124,7 @@ You can find it in the installation directory under the `examples/c` path. This ::: -## API reference +## API Reference The following describes the basic API, synchronous API, asynchronous API, subscription API, and schemaless write API of TDengine client driver, respectively. @@ -141,10 +140,9 @@ The base API is used to do things like create database connections and provide a Cleans up the runtime environment and should be called before the application exits. -- ` int taos_options(TSDB_OPTION option, const void * arg, ...) ` +- `int taos_options(TSDB_OPTION option, const void * arg, ...)` - Set client options, currently supports region setting (`TSDB_OPTION_LOCALE`), character set -(`TSDB_OPTION_CHARSET`), time zone (`TSDB_OPTION_TIMEZONE`), configuration file path (`TSDB_OPTION_CONFIGDIR`). The region setting, character set, and time zone default to the current settings of the operating system. + Set client options, currently supports region setting (`TSDB_OPTION_LOCALE`), character set (`TSDB_OPTION_CHARSET`), time zone (`TSDB_OPTION_TIMEZONE`), configuration file path (`TSDB_OPTION_CONFIGDIR`). The region setting, character set, and time zone default to the current settings of the operating system. - `char *taos_get_client_info()` @@ -157,7 +155,7 @@ The base API is used to do things like create database connections and provide a - host: FQDN of any node in the TDengine cluster - user: user name - pass: password - - db: the database name. Even if the user does not provide this, the connection will still work correctly. The user can create a new database through this connection. If the user provides the database name, it means that the database has already been created and the connection can be used for regular operations on the database. + - db: the database name. Even if the user does not provide this, the connection will still work correctly. The user can create a new database through this connection. If the user provides the database name, it means that the database has already been created and the connection can be used for regular operations on the database. - port: the port the taosd program is listening on NULL indicates a failure. The application needs to save the returned parameters for subsequent use. @@ -171,7 +169,7 @@ The base API is used to do things like create database connections and provide a Get server-side version information. -- ` int taos_select_db(TAOS *taos, const char *db)` +- `int taos_select_db(TAOS *taos, const char *db)` Set the current default database to `db`. @@ -211,15 +209,15 @@ The APIs described in this subsection are all synchronous interfaces. After bein Get the number of rows affected by the executed SQL statement. -- ` TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)` +- `TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)` Gets the properties of each column of the query result set (column name, column data type, column length), used in conjunction with `taos_num_fields()` to parse a tuple (one row) of data returned by `taos_fetch_row()`. The structure of `TAOS_FIELD` is as follows. ```c typedef struct taosField { - char name[65]; // column name - uint8_t type; // data type - int16_t bytes; // length, in bytes + char name[65]; // column name + uint8_t type; // data type + int16_t bytes; // length, in bytes } TAOS_FIELD; ``` @@ -227,7 +225,7 @@ typedef struct taosField { Stops the execution of the current query. -- ` void taos_free_result(TAOS_RES *res)` +- `void taos_free_result(TAOS_RES *res)` Frees the query result set and the associated resources. Be sure to call this API to free the resources after the query is completed. Failing to call this, may lead to a memory leak in the application. However, note that the application will crash if you call a function like `taos_consume()` to get the query results after freeing the resources. @@ -235,7 +233,7 @@ typedef struct taosField { Get the reason for the failure of the last API call. The return value is an error message identified by a string. -- 'int taos_errno(TAOS_RES *res)` +- `int taos_errno(TAOS_RES *res)` Get the reason for the last API call failure. The return value is the error code. @@ -294,7 +292,7 @@ The specific functions related to the interface are as follows (see also the [pr Creates a TAOS_STMT object for subsequent calls. -- ` int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length)` +- `int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length)` Parse a SQL command, and bind the parsed result and parameter information to `stmt`. If the parameter length is greater than 0, use this parameter as the length of the SQL command. If it is equal to 0, the length of the SQL command will be determined automatically. @@ -332,16 +330,16 @@ The specific functions related to the interface are as follows (see also the [pr ```c typedef struct TAOS_MULTI_BIND { - int buffer_type; - void * buffer; - uintptr_t buffer_length; - uintptr_t * length; - char * is_null; - int num; // the number of columns + int buffer_type; + void * buffer; + uintptr_t buffer_length; + uintptr_t * length; + char * is_null; + int num; // the number of columns } TAOS_MULTI_BIND; ``` -- ` int taos_stmt_add_batch(TAOS_STMT *stmt)` +- `int taos_stmt_add_batch(TAOS_STMT *stmt)` Adds the currently bound parameter to the batch. After calling this function, you can call `taos_stmt_bind_param()` or `taos_stmt_bind_param_batch()` again to bind a new parameter. Note that this function only supports INSERT/IMPORT statements. Other SQL command such as SELECT will return an error. @@ -349,7 +347,7 @@ The specific functions related to the interface are as follows (see also the [pr Execute the prepared statement. Currently, a statement can only be executed once. -- ` TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)` +- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)` Gets the result set of a statement. Use the result set in the same way as in the non-parametric call. When finished, `taos_free_result()` should be called on this result set to free resources. @@ -357,7 +355,7 @@ The specific functions related to the interface are as follows (see also the [pr Finish execution and release all resources. -- ` char * taos_stmt_errstr(TAOS_STMT *stmt)` +- `char * taos_stmt_errstr(TAOS_STMT *stmt)` (Available in 2.1.3.0 and later versions) Used to get error information if other STMT APIs return errors (return error codes or null pointers). @@ -405,46 +403,3 @@ In addition to writing data using the SQL method or the parameter binding API, w **Supported Versions** This feature interface is supported from version 2.3.0.0. - -### Subscription and Consumption API - -The Subscription API currently supports subscribing to one or more tables and continuously fetching the latest data written to them by polling periodically. - -- `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)` - - This function is responsible for starting the subscription service, returning the subscription object on success and `NULL` on failure, with the following parameters. - - - taos: the database connection that has been established. - - restart: if the subscription already exists, whether to restart or continue the previous subscription. - - topic: the topic of the subscription (i.e., the name). This parameter is the unique identifier of the subscription. - - sql: the query statement of the subscription which can only be a _select_ statement. Only the original data should be queried, and data can only be queried in temporal order. - - fp: the callback function when the query result is received only used when called asynchronously. This parameter should be passed `NULL` when called synchronously. The function prototype is described below. - - param: additional parameter when calling the callback function. The system API will pass it to the callback function as is, without any processing. - - interval: polling period in milliseconds. The callback function will be called periodically according to this parameter when called asynchronously. The interval should not be too small to avoid impact on system performance when called synchronously. If the interval between two calls to `taos_consume()` is less than this period, the API will block until the interval exceeds this period. - -- ` typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)` - - The prototype of the callback function in asynchronous mode with the following parameters - - - tsub: subscription object - - res: query result set, note that there may be no records in the result set - - param: additional parameters provided by the client program when calling `taos_subscribe()` - - code: error code - - :::note - The callback function should not take too long to process, especially if the returned result set has a lot of data. Otherwise, it may lead to an abnormal state, such as client blocking. If you must perform complex calculations, we recommend handling them in a separate thread. - - ::: - -- `TAOS_RES *taos_consume(TAOS_SUB *tsub)` - - In synchronous mode, this function is used to fetch the results of a subscription. The user application places it in a loop. If the interval between two calls to `taos_consume()` is less than the polling period of the subscription, the API will block until the interval exceeds this period. If a new record arrives in the database, the API returns that latest record. Otherwise, it returns an empty result set with no records. If the return value is `NULL`, there is a system error. This API should not be called by user programs in asynchronous mode. - - :::note - After calling `taos_consume()`, the user application should make sure to call `taos_fetch_row()` or `taos_fetch_block()` to process the subscription results as soon as possible. Otherwise, the server-side will keep caching the query result data waiting to be read by the client, which in extreme cases will cause the server side to run out of memory and affect the stability of the service. - - ::: - -- `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)` - - Unsubscribe. If the parameter `keepProgress` is not 0, the API will keep the progress information of the subscription, and subsequent calls to `taos_subscribe()` will continue based on this progress; otherwise, the progress information will be deleted, and subsequent readings will have to be restarted. diff --git a/docs/en/14-reference/03-connector/java.mdx b/docs/en/14-reference/03-connector/04-java.mdx similarity index 75% rename from docs/en/14-reference/03-connector/java.mdx rename to docs/en/14-reference/03-connector/04-java.mdx index 22f99bb9ae8fa669155ba8ac7cec1ad2c609cb32..129d90ea85d9455c1ae460b3799b5253dd3a49fc 100644 --- a/docs/en/14-reference/03-connector/java.mdx +++ b/docs/en/14-reference/03-connector/04-java.mdx @@ -1,19 +1,18 @@ --- toc_max_heading_level: 4 -sidebar_position: 2 sidebar_label: Java title: TDengine Java Connector -description: TDengine Java based on JDBC API and provide both native and REST connections +description: The TDengine Java Connector is implemented on the standard JDBC API and provides native and REST connectors. --- import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -'taos-jdbcdriver' is TDengine's official Java language connector, which allows Java developers to develop applications that access the TDengine database. 'taos-jdbcdriver' implements the interface of the JDBC driver standard and provides two forms of connectors. One is to connect to a TDengine instance natively through the TDengine client driver (taosc), which supports functions including data writing, querying, subscription, schemaless writing, and bind interface. And the other is to connect to a TDengine instance through the REST interface provided by taosAdapter (2.4.0.0 and later). The implementation of the REST connection and those of the native connections have slight differences in features. +`taos-jdbcdriver` is the official Java connector for TDengine. Java developers can use it to develop applications that access data in TDengine. `taos-jdbcdriver` implements standard JDBC driver interfaces and two connection methods: One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The second is **REST connection** which is implemented through taosAdapter. The set of features implemented by the REST connection differs slightly from those implemented by the native connection. -![TDengine Database tdengine-connector](tdengine-jdbc-connector.webp) +![TDengine Database Connector Java](tdengine-jdbc-connector.webp) -The preceding diagram shows two ways for a Java app to access TDengine via connector: +The preceding figure shows the two ways in which a Java application can access TDengine. - JDBC native connection: Java applications use TSDBDriver on physical node 1 (pnode1) to call client-driven directly (`libtaos.so` or `taos.dll`) APIs to send writing and query requests to taosd instances located on physical node 2 (pnode2). - JDBC REST connection: The Java application encapsulates the SQL as a REST request via RestfulDriver, sends it to the REST server (taosAdapter) on physical node 2. taosAdapter forwards the request to TDengine server and returns the result. @@ -30,34 +29,34 @@ TDengine's JDBC driver implementation is as consistent as possible with the rela ## Supported platforms -Native connection supports the same platform as TDengine client-driven support. +Native connections are supported on the same platforms as the TDengine client driver. REST connection supports all platforms that can run Java. ## Version support -Please refer to [Version Support List](/reference/connector#version-support). +Please refer to [version support list](/reference/connector#version-support) ## TDengine DataType vs. Java DataType TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Java is as follows: -| TDengine DataType | JDBCType (driver version < 2.0.24) | JDBCType (driver version > = 2.0.24) | -| ----------------- | ---------------------------------- | ------------------------------------ | -| TIMESTAMP | java.lang.Long | java.sql.Timestamp | -| INT | java.lang.Integer | java.lang.Integer | -| BIGINT | java.lang.Long | java.lang.Long | -| FLOAT | java.lang.Float | java.lang.Float | -| DOUBLE | java.lang.Double | java.lang.Double | -| SMALLINT | java.lang.Short | java.lang.Short | -| TINYINT | java.lang.Byte | java.lang.Byte | -| BOOL | java.lang.Boolean | java.lang.Boolean | -| BINARY | java.lang.String | byte array | -| NCHAR | java.lang.String | java.lang.String | -| JSON | - | java.lang.String | +| TDengine DataType | JDBCType | +| ----------------- | ---------------------------------- | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | +| SMALLINT | java.lang.Short | +| TINYINT | java.lang.Byte | +| BOOL | java.lang.Boolean | +| BINARY | byte array | +| NCHAR | java.lang.String | +| JSON | java.lang.String | **Note**: Only TAG supports JSON types -## Installation steps +## Installation Steps ### Pre-installation preparation @@ -71,17 +70,19 @@ Before using Java Connector to connect to the database, the following conditions +taos-jdbcdriver has been published on the [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) and synchronized to other major repositories. + - [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) - [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver) - [maven.aliyun](https://maven.aliyun.com/mvn/search) Add following dependency in the `pom.xml` file of your Maven project: -```xml +```xml-dtd com.taosdata.jdbc taos-jdbcdriver - 2.0.** + 3.0.0 ``` @@ -90,26 +91,26 @@ Add following dependency in the `pom.xml` file of your Maven project: You can build Java connector from source code after cloning the TDengine project: -``` -git clone https://github.com/taosdata/taos-connector-jdbc.git --branch 2.0 +```shell +git clone https://github.com/taosdata/taos-connector-jdbc.git cd taos-connector-jdbc mvn clean install -Dmaven.test.skip=true ``` -After compilation, a jar package named taos-jdbcdriver-2.0.XX-dist.jar is generated in the target directory, and the compiled jar file is automatically placed in the local Maven repository. +After you have compiled taos-jdbcdriver, the `taos-jdbcdriver-3.0.*-dist.jar` file is created in the target directory. The compiled JAR file is automatically stored in your local Maven repository. -## Establish a connection +## Establishing a connection TDengine's JDBC URL specification format is: -`jdbc:[TAOS| TAOS-RS]://[host_name]:[port]/[database_name]? [user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` +`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` For establishing connections, native connections differ slightly from REST connections. - + ```java Class.forName("com.taosdata.jdbc.TSDBDriver"); @@ -129,55 +130,50 @@ The configuration parameters in the URL are as follows: - charset: The character set used by the client, the default value is the system character set. - locale: Client locale, by default, use the system's current locale. - timezone: The time zone used by the client, the default value is the system's current time zone. -- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: false. Enabling batch pulling and obtaining a batch of data can improve query performance when the query data volume is large. +- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is true. Enabling batch pulling and obtaining a batch of data can improve query performance when the query data volume is large. - batchErrorIgnore:true: When executing statement executeBatch, if there is a SQL execution failure in the middle, the following SQL will continue to be executed. false: No more statements after the failed SQL are executed. The default value is: false. -For more information about JDBC native connections, see [Video Tutorial](https://www.taosdata.com/blog/2020/11/11/1955.html). - **Connect using the TDengine client-driven configuration file ** When you use a JDBC native connection to connect to a TDengine cluster, you can use the TDengine client driver configuration file to specify parameters such as `firstEp` and `secondEp` of the cluster in the configuration file as below: 1. Do not specify hostname and port in Java applications. - ```java - public Connection getConn() throws Exception{ - Class.forName("com.taosdata.jdbc.TSDBDriver"); - String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata"; - Properties connProps = new Properties(); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - Connection conn = DriverManager.getConnection(jdbcUrl, connProps); - return conn; - } - ``` +```java +public Connection getConn() throws Exception{ + Class.forName("com.taosdata.jdbc.TSDBDriver"); + String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + Connection conn = DriverManager.getConnection(jdbcUrl, connProps); + return conn; +} +``` 2. specify the firstEp and the secondEp in the configuration file taos.cfg - ```shell - # first fully qualified domain name (FQDN) for TDengine system - firstEp cluster_node1:6030 +```shell +# first fully qualified domain name (FQDN) for TDengine system +firstEp cluster_node1:6030 - # second fully qualified domain name (FQDN) for TDengine system, for cluster only - secondEp cluster_node2:6030 +# second fully qualified domain name (FQDN) for TDengine system, for cluster only +secondEp cluster_node2:6030 - # default system charset - # charset UTF-8 +# default system charset +# charset UTF-8 - # system locale - # locale en_US.UTF-8 - ``` +# system locale +# locale en_US.UTF-8 +``` In the above example, JDBC uses the client's configuration file to establish a connection to a hostname `cluster_node1`, port 6030, and a database named `test`. When the firstEp node in the cluster fails, JDBC attempts to connect to the cluster using secondEp. In TDengine, as long as one node in firstEp and secondEp is valid, the connection to the cluster can be established normally. -:::note The configuration file here refers to the configuration file on the machine where the application that calls the JDBC Connector is located, the default path is `/etc/taos/taos.cfg` on Linux, and the default path is `C://TDengine/cfg/taos.cfg` on Windows. -::: - @@ -195,11 +191,11 @@ There is no dependency on the client driver when Using a JDBC REST connection. C 2. jdbcUrl starting with "jdbc:TAOS-RS://". 3. use 6041 as the connection port. -The configuration parameters in the URL are as follows. +The configuration parameters in the URL are as follows: -- user: Login TDengine user name, default value 'root'. -- password: user login password, default value 'taosdata'. -- batchfetch: true: pull the result set in batch when executing the query; false: pull the result set row by row. The default value is false. batchfetch uses HTTP for data transfer. The JDBC REST connection supports bulk data pulling function in taos-jdbcdriver-2.0.38 and TDengine 2.4.0.12 and later versions. taos-jdbcdriver and TDengine transfer data via WebSocket connection. Compared with HTTP, WebSocket enables JDBC REST connection to support large data volume querying and improve query performance. +- user: Log in to the TDengine username. The default value is 'root'. +- password: User login password, the default value is 'taosdata'. +- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: false. batchfetch uses HTTP for data transfer. JDBC REST supports batch pulls. taos-jdbcdriver and TDengine transfer data via WebSocket connection. Compared with HTTP, WebSocket enables JDBC REST connection to support large data volume querying and improve query performance. - charset: specify the charset to parse the string, this parameter is valid only when set batchfetch to true. - batchErrorIgnore: true: when executing executeBatch of Statement, if one SQL execution fails in the middle, continue to execute the following SQL. false: no longer execute any statement after the failed SQL. The default value is: false. - httpConnectTimeout: REST connection timeout in milliseconds, the default value is 5000 ms. @@ -211,13 +207,13 @@ The configuration parameters in the URL are as follows. :::note -- Unlike the native connection method, the REST interface is stateless. When using the JDBC REST connection, you need to specify the database name of the table and super table in SQL. For example. +- Unlike the native connection method, the REST interface is stateless. When using the JDBC REST connection, you need to specify the database name of the table and super table in SQL. For example: ```sql INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6); ``` -- Starting from taos-jdbcdriver-2.0.36 and TDengine 2.2.0.0, if dbname is specified in the URL, JDBC REST connections will use `/rest/sql/dbname` as the URL for REST requests by default, and there is no need to specify dbname in SQL. For example, if the URL is `jdbc:TAOS-RS://127.0.0.1:6041/test`, then the SQL can be executed: insert into test using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); +- If the dbname is specified in the URL, the JDBC REST connection uses /rest/sql/dbname as the default URL for RESTful requests. In this case, it is not necessary to specify the dbname in SQL. For example, if the URL is `jdbc:TAOS-RS://127.0.0.1:6041/test`, then the SQL can be executed: insert into test using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); ::: @@ -228,10 +224,10 @@ INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFra In addition to getting the connection from the specified URL, you can use Properties to specify parameters when the connection is established. -**Note**: +Note: - The client parameter set in the application is process-level. If you want to update the parameters of the client, you need to restart the application. This is because the client parameter is a global parameter that takes effect only the first time the application is set. -- The following sample code is based on taos-jdbcdriver-2.0.36. +- The following sample code is based on taos-jdbcdriver-3.0.0. ```java public Connection getConn() throws Exception{ @@ -257,14 +253,14 @@ public Connection getRestConn() throws Exception{ } ``` -In the above example, a connection is established to `taosdemo.com`, port is 6030/6041, and database named `test`. The connection specifies the user name as `root` and the password as `taosdata` in the URL and specifies the character set, language environment, time zone, and whether to enable bulk fetching in the connProps. +In the above example, a connection is established to `taosdemo.com`, port is 6030/6041, and database named `test`. The connection specifies the user name as `root` and the password as `taosdata` in the URL and specifies the character set, language environment, time zone, and whether to enable bulk fetching in the connProps.The url specifies the user name as `root` and the password as `taosdata`. The configuration parameters in properties are as follows. - TSDBDriver.PROPERTY_KEY_USER: login TDengine user name, default value 'root'. - TSDBDriver.PROPERTY_KEY_PASSWORD: user login password, default value 'taosdata'. - TSDBDriver.PROPERTY_KEY_BATCH_LOAD: true: pull the result set in batch when executing query; false: pull the result set row by row. The default value is: false. -- TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true: when executing executeBatch of Statement, if there is a SQL execution failure in the middle, continue to execute the following sq. false: no longer execute any statement after the failed SQL. The default value is: false. +- TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true: when executing executeBatch of Statement, if there is a SQL execution failure in the middle, continue to execute the following sql. false: no longer execute any statement after the failed SQL. The default value is: false. - TSDBDriver.PROPERTY_KEY_CONFIG_DIR: only works when using JDBC native connection. Client configuration file directory path, default value `/etc/taos` on Linux OS, default value `C:/TDengine/cfg` on Windows OS. - TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set. - TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale. @@ -272,7 +268,7 @@ The configuration parameters in properties are as follows. - TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection. - TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection and batchfetch is false. - TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when using JDBC REST connection and batchfetch is true. -- TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL conneciton, false: not using SSL connection. It only takes effect when using using JDBC REST connection. +- TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL conneciton, false: not using SSL connection. It only takes effect when using JDBC REST connection. For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only). ### Priority of configuration parameters @@ -304,7 +300,7 @@ stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int > **Note**: If you do not use `use db` to specify the database, all subsequent operations on the table need to add the database name as a prefix, such as db.tb. -### Insert data +### 插入数据 ```java // insert data @@ -319,7 +315,7 @@ System.out.println("insert " + affectedRows + " rows."); ### Querying data ```java -// query data +// insert data ResultSet resultSet = stmt.executeQuery("select * from tb"); Timestamp ts = null; @@ -354,25 +350,21 @@ try (Statement statement = connection.createStatement()) { } ``` -There are three types of error codes that the JDBC connector can report: - -- Error code of the JDBC driver itself (error code between 0x2301 and 0x2350) -- Error code of the native connection method (error code between 0x2351 and 0x2400) -- Error code of other TDengine function modules +There are three types of error codes that the JDBC connector can report: - Error code of the JDBC driver itself (error code between 0x2301 and 0x2350), - Error code of the native connection method (error code between 0x2351 and 0x2400), and - Error code of other TDengine function modules. For specific error codes, please refer to. - [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) -- [TDengine_ERROR_CODE](https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h) + ### Writing data via parameter binding -TDengine's native JDBC connection implementation has significantly improved its support for data writing (INSERT) scenarios via bind interface with version 2.1.2.0 and later versions. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases. +TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases. -**Note**. +**Note:** - JDBC REST connections do not currently support bind interface -- The following sample code is based on taos-jdbcdriver-2.0.36 +- The following sample code is based on taos-jdbcdriver-3.0.0 - The setString method should be called for binary type data, and the setNString method should be called for nchar type data - both setString and setNString require the user to declare the width of the corresponding column in the size parameter of the table definition @@ -577,7 +569,7 @@ public class ParameterBindingDemo { // set table name pstmt.setTableName("t5_" + i); // set tags - pstmt.setTagNString(0, "California-abc"); + pstmt.setTagNString(0, "California.SanFrancisco"); // set columns ArrayList tsList = new ArrayList<>(); @@ -588,7 +580,7 @@ public class ParameterBindingDemo { ArrayList f1List = new ArrayList<>(); for (int j = 0; j < numOfRow; j++) { - f1List.add("California-abc"); + f1List.add("California.LosAngeles"); } pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE); @@ -635,12 +627,12 @@ public void setNString(int columnIndex, ArrayList list, int size) throws ### Schemaless Writing -Starting with version 2.2.0.0, TDengine has added the ability to perform schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. See [schemaless writing](/reference/schemaless/) for details. +TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless). -**Note**. +Note: - JDBC REST connections do not currently support schemaless writes -- The following sample code is based on taos-jdbcdriver-2.0.36 +- The following sample code is based on taos-jdbcdriver-3.0.0 ```java public class SchemalessInsertTest { @@ -671,59 +663,137 @@ public class SchemalessInsertTest { } ``` -### Subscriptions +### Data Subscription The TDengine Java Connector supports subscription functionality with the following application API. -#### Create subscriptions +#### Create a Topic ```java -TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topicname", "select * from meters", false); +Connection connection = DriverManager.getConnection(url, properties); +Statement statement = connection.createStatement(); +statement.executeUpdate("create topic if not exists topic_speed as select ts, speed from speed_table"); ``` The three parameters of the `subscribe()` method have the following meanings. -- topicname: the name of the subscribed topic. This parameter is the unique identifier of the subscription. -- sql: the query statement of the subscription. This statement can only be a `select` statement. Only original data can be queried, and you can query the data only temporal order. -- restart: if the subscription already exists, whether to restart or continue the previous subscription +- topic_speed: the subscribed topic (name). This is the unique identifier of the subscription. +- sql: the query statement of the subscription which can only be a _select_ statement. Only the original data should be queried, and data can only be queried in temporal order.. + +The preceding example uses the SQL statement `select ts, speed from speed_table` and creates a subscription named `topic_speed`. + +#### Create a Consumer -The above example will use the SQL command `select * from meters` to create a subscription named `topicname`. If the subscription exists, it will continue the progress of the previous query instead of consuming all the data from the beginning. +```java +Properties config = new Properties(); +config.setProperty("enable.auto.commit", "true"); +config.setProperty("group.id", "group1"); +config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer"); + +TaosConsumer consumer = new TaosConsumer<>(config); +``` + +- enable.auto.commit: Specifies whether to commit automatically. +- group.id: consumer: Specifies the group that the consumer is in. +- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set. +- For more information, see [Consumer Parameters](../../../develop/tmq). #### Subscribe to consume data ```java -int total = 0; while(true) { - TSDBResultSet rs = sub.consume(); - int count = 0; - while(rs.next()) { - count++; - } - total += count; - System.out.printf("%d rows consumed, total %d\n", count, total); - Thread.sleep(1000); + ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); + for (ResultBean record : records) { + process(record); + } } ``` -The `consume()` method returns a result set containing all new data from the last `consume()`. Be sure to choose a reasonable frequency for calling `consume()` as needed (e.g. `Thread.sleep(1000)` in the example). Otherwise, it will cause unnecessary stress on the server-side. +`poll` obtains one message each time it is run. #### Close subscriptions ```java -sub.close(true); +// Unsubscribe +consumer.unsubscribe(); +// Close consumer +consumer.close() ``` -The `close()` method closes a subscription. If its argument is `true` it means that the subscription progress information is retained, and the subscription with the same name can be created to continue consuming data; if it is `false` it does not retain the subscription progress. +For more information, see [Data Subscription](../../../develop/tmq). -### Closing resources +### Usage examples ```java -resultSet.close(); -stmt.close(); -conn.close(); -``` +public abstract class ConsumerLoop { + private final TaosConsumer consumer; + private final List topics; + private final AtomicBoolean shutdown; + private final CountDownLatch shutdownLatch; + + public ConsumerLoop() throws SQLException { + Properties config = new Properties(); + config.setProperty("msg.with.table.name", "true"); + config.setProperty("enable.auto.commit", "true"); + config.setProperty("group.id", "group1"); + config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer"); + + this.consumer = new TaosConsumer<>(config); + this.topics = Collections.singletonList("topic_speed"); + this.shutdown = new AtomicBoolean(false); + this.shutdownLatch = new CountDownLatch(1); + } -> **Be sure to close the connection**, otherwise, there will be a connection leak. + public abstract void process(ResultBean result); + + public void pollData() throws SQLException { + try { + consumer.subscribe(topics); + + while (!shutdown.get()) { + ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); + for (ResultBean record : records) { + process(record); + } + } + consumer.unsubscribe(); + } finally { + consumer.close(); + shutdownLatch.countDown(); + } + } + + public void shutdown() throws InterruptedException { + shutdown.set(true); + shutdownLatch.await(); + } + + public static class ResultDeserializer extends ReferenceDeserializer { + + } + + public static class ResultBean { + private Timestamp ts; + private int speed; + + public Timestamp getTs() { + return ts; + } + + public void setTs(Timestamp ts) { + this.ts = ts; + } + + public int getSpeed() { + return speed; + } + + public void setSpeed(int speed) { + this.speed = speed; + } + } +} +``` ### Use with connection pool @@ -754,7 +824,7 @@ Example usage is as follows. //query or insert // ... - connection.close(); // put back to connection pool + connection.close(); // put back to conneciton pool } ``` @@ -786,26 +856,12 @@ public static void main(String[] args) throws Exception { //query or insert // ... - connection.close(); // put back to connection pool + connection.close(); // put back to conneciton pool } ``` > For more questions about using druid, please see [Official Instructions](https://github.com/alibaba/druid). -**Caution:** - -- TDengine `v1.6.4.1` provides a special function `select server_status()` for heartbeat detection, so it is recommended to use `select server_status()` for Validation Query when using connection pooling. - -As you can see below, `select server_status()` returns `1` on successful execution. - -```sql -taos> select server_status(); -server_status()| -================ -1 | -Query OK, 1 row(s) in set (0.000141s) -``` - ### More sample programs The source code of the sample application is under `TDengine/examples/JDBC`: @@ -816,12 +872,13 @@ The source code of the sample application is under `TDengine/examples/JDBC`: - SpringJdbcTemplate: using taos-jdbcdriver in Spring JdbcTemplate. - mybatisplus-demo: using taos-jdbcdriver in Springboot + Mybatis. -Please refer to: [JDBC example](https://github.com/taosdata/TDengine/tree/develop/examples/JDBC) +[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC) ## Recent update logs | taos-jdbcdriver version | major changes | | :---------------------: | :--------------------------------------------: | +| 3.0.0 | Support for TDengine 3.0 | | 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters | | 2.0.38 | JDBC REST connections add bulk pull function | | 2.0.37 | Support json tags | @@ -841,13 +898,19 @@ Please refer to: [JDBC example](https://github.com/taosdata/TDengine/tree/develo **Solution**: On Windows you can copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32` directory, on Linux the following soft link will be created `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` will work. -3. java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on an IA 32-bit platform +3. java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform **Cause**: Currently, TDengine only supports 64-bit JDK. - **Solution**: Reinstall the 64-bit JDK. 4. + **Solution**: Reinstall the 64-bit JDK. + +4. java.lang.NoSuchMethodError: setByteArray + + **Cause**: taos-jbdcdriver 3.* only supports TDengine 3.0 and later. + + **Solution**: Use taos-jdbcdriver 2.* with your TDengine 2.* deployment. -For other questions, please refer to [FAQ](/train-faq/faq) +For additional troubleshooting, see [FAQ](../../../train-faq/faq). ## API Reference diff --git a/docs/en/14-reference/03-connector/go.mdx b/docs/en/14-reference/03-connector/05-go.mdx similarity index 80% rename from docs/en/14-reference/03-connector/go.mdx rename to docs/en/14-reference/03-connector/05-go.mdx index 8a05f2d841bbcdbab2bdb7471691ca0ae49a4f6b..00e3bc1bc38bb7433e823642202957abbdf5566c 100644 --- a/docs/en/14-reference/03-connector/go.mdx +++ b/docs/en/14-reference/03-connector/05-go.mdx @@ -1,6 +1,5 @@ --- toc_max_heading_level: 4 -sidebar_position: 4 sidebar_label: Go title: TDengine Go Connector --- @@ -8,7 +7,7 @@ title: TDengine Go Connector import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -import Preparation from "./_preparation.mdx" +import Preparition from "./_preparition.mdx" import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx" import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx" import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx" @@ -23,7 +22,7 @@ This article describes how to install `driver-go` and connect to TDengine cluste The source code of `driver-go` is hosted on [GitHub](https://github.com/taosdata/driver-go). -## Supported Platforms +## Supported platforms Native connections are supported on the same platforms as the TDengine client driver. REST connections are supported on all platforms that can run Go. @@ -41,33 +40,31 @@ A "native connection" is established by the connector directly to the TDengine i * Normal queries * Continuous queries * Subscriptions -* schemaless interface -* parameter binding interface +* Schemaless interface +* Parameter binding interface ### REST connection A "REST connection" is a connection between the application and the TDengine instance via the REST API provided by the taosAdapter component. The following features are supported: -* General queries +* Normal queries * Continuous queries -## Installation steps +## Installation Steps -### Pre-installation +### Pre-installation preparation -- Install Go development environment (Go 1.14 and above, GCC 4.8.5 and above) +* Install Go development environment (Go 1.14 and above, GCC 4.8.5 and above) - If you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector/#install-client-driver) for specific steps Configure the environment variables and check the command. -* `go env` -* `gcc -v` +* ```go env``` +* ```gcc -v``` ### Use go get to install -``` -go get -u github.com/taosdata/driver-go/v2@develop -``` +`go get -u github.com/taosdata/driver-go/v3@latest` ### Manage with go mod @@ -75,14 +72,14 @@ go get -u github.com/taosdata/driver-go/v2@develop ```text go mod init taos-demo - ``` + ``` 2. Introduce taosSql ```go import ( "database/sql" - _ "github.com/taosdata/driver-go/v2/taosSql" + _ "github.com/taosdata/driver-go/v3/taosSql" ) ``` @@ -90,7 +87,7 @@ go get -u github.com/taosdata/driver-go/v2@develop ```text go mod tidy - ``` + ``` 4. Run the program with `go run taos-demo` or compile the binary with the `go build` command. @@ -99,14 +96,14 @@ go get -u github.com/taosdata/driver-go/v2@develop go build ``` -## Create a connection +## Establishing a connection ### Data source name (DSN) Data source names have a standard format, e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php), but no type prefix (square brackets indicate optionally): ``` text -[username[:password]@][protocol[(address)]]/[dbname][?param1=value1&... ¶mN=valueN] +[username[:password]@][protocol[(address)]]/[dbname][?param1=value1&...¶mN=valueN] ``` DSN in full form. @@ -114,7 +111,6 @@ DSN in full form. ```text username:password@protocol(address)/dbname?param=value ``` - ### Connecting via connector @@ -126,7 +122,7 @@ Use `taosSql` as `driverName` and use a correct [DSN](#DSN) as `dataSourceName`, * configPath specifies the `taos.cfg` directory -Example. +For example: ```go package main @@ -135,13 +131,13 @@ import ( "database/sql" "fmt" - _ "github.com/taosdata/driver-go/v2/taosSql" + _ "github.com/taosdata/driver-go/v3/taosSql" ) func main() { var taosUri = "root:taosdata@tcp(localhost:6030)/" taos, err := sql.Open("taosSql", taosUri) - if err ! = nil { + if err != nil { fmt.Println("failed to connect TDengine, err:", err) return } @@ -158,7 +154,7 @@ Use `taosRestful` as `driverName` and use a correct [DSN](#DSN) as `dataSourceNa * `disableCompression` whether to accept compressed data, default is true do not accept compressed data, set to false if transferring data using gzip compression. * `readBufferSize` The default size of the buffer for reading data is 4K (4096), which can be adjusted upwards when the query result has a lot of data. -Example. +For example: ```go package main @@ -167,13 +163,13 @@ import ( "database/sql" "fmt" - _ "github.com/taosdata/driver-go/v2/taosRestful" + _ "github.com/taosdata/driver-go/v3/taosRestful" ) func main() { var taosUri = "root:taosdata@http(localhost:6041)/" taos, err := sql.Open("taosRestful", taosUri) - if err ! = nil { + if err != nil { fmt.Println("failed to connect TDengine, err:", err) return } @@ -208,14 +204,14 @@ func main() { ### More sample programs -* [sample program](https://github.com/taosdata/TDengine/tree/develop/examples/go) -* [Video tutorial](https://www.taosdata.com/blog/2020/11/11/1951.html). +* [sample program](https://github.com/taosdata/driver-go/tree/3.0/examples) + ## Usage limitations Since the REST interface is stateless, the `use db` syntax will not work. You need to put the db name into the SQL command, e.g. `create table if not exists tb1 (ts timestamp, a int)` to `create table if not exists test.tb1 (ts timestamp, a int)` otherwise it will report the error `[0x217] Database not specified or available`. -You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. This method is supported by taosAdapter since TDengine 2.4.0.5. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error. +You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error. The complete example is as follows. @@ -227,7 +223,7 @@ import ( "fmt" "time" - _ "github.com/taosdata/driver-go/v2/taosRestful" + _ "github.com/taosdata/driver-go/v3/taosRestful" ) func main() { @@ -269,35 +265,27 @@ func main() { ## Frequently Asked Questions -1. Cannot find the package `github.com/taosdata/driver-go/v2/taosRestful` - - Change the `github.com/taosdata/driver-go/v2` line in the require block of the `go.mod` file to `github.com/taosdata/driver-go/v2 develop`, then execute `go mod tidy`. - -2. bind interface in database/sql crashes +1. bind interface in database/sql crashes REST does not support parameter binding related interface. It is recommended to use `db.Exec` and `db.Query`. -3. error `[0x217] Database not specified or available` after executing other statements with `use db` statement +2. error `[0x217] Database not specified or available` after executing other statements with `use db` statement The execution of SQL command in the REST interface is not contextual, so using `use db` statement will not work, see the usage restrictions section above. -4. use `taosSql` without error but use `taosRestful` with error `[0x217] Database not specified or available` +3. use `taosSql` without error but use `taosRestful` with error `[0x217] Database not specified or available` Because the REST interface is stateless, using the `use db` statement will not take effect. See the usage restrictions section above. -5. Upgrade `github.com/taosdata/driver-go/v2/taosRestful` - - Change the `github.com/taosdata/driver-go/v2` line in the `go.mod` file to `github.com/taosdata/driver-go/v2 develop`, then execute `go mod tidy`. - -6. `readBufferSize` parameter has no significant effect after being increased +4. `readBufferSize` parameter has no significant effect after being increased Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance. -7. `disableCompression` parameter is set to `false` when the query efficiency is reduced +5. `disableCompression` parameter is set to `false` when the query efficiency is reduced When set `disableCompression` parameter to `false`, the query result will be compressed by `gzip` and then transmitted, so you have to decompress the data by `gzip` after getting it. -8. `go get` command can't get the package, or timeout to get the package +6. `go get` command can't get the package, or timeout to get the package Set Go proxy `go env -w GOPROXY=https://goproxy.cn,direct`. @@ -311,14 +299,13 @@ func main() { :::info This API is created successfully without checking permissions, but only when you execute a Query or Exec, and check if user/password/host/port is legal. - ::: -* `func (db *DB) Exec(query string, args . .interface{}) (Result, error)` +* `func (db *DB) Exec(query string, args ...interface{}) (Result, error)` `sql.Open` built-in method to execute non-query related SQL. -* `func (db *DB) Query(query string, args ... . interface{}) (*Rows, error)` +* `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)` `sql.Open` Built-in method to execute query statements. @@ -338,17 +325,33 @@ The `af` package encapsulates TDengine advanced functions such as connection man #### Subscribe to -* `func (conn *Connector) Subscribe(restart bool, topic string, sql string, interval time.Duration) (Subscriber, error)` +* `func NewConsumer(conf *Config) (*Consumer, error)` + +Creates consumer group. + +* `func (c *Consumer) Subscribe(topics []string) error` + +Subscribes to a topic. + +* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)` - Subscribe to data. +Polling information. -* `func (s *taosSubscriber) Consume() (driver.Rows, error)` +* `func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error` - Consume the subscription data, returning the `Rows` structure of the `database/sql/driver` package. +Commit information. -* `func (s *taosSubscriber) Unsubscribe(keepProgress bool)` +* `func (c *Consumer) FreeMessage(message unsafe.Pointer)` - Unsubscribe from data. +Free information. + +* `func (c *Consumer) Unsubscribe() error` + +Unsubscribe. + +* `func (c *Consumer) Close() error` + +Close consumer. #### schemaless @@ -370,11 +373,7 @@ The `af` package encapsulates TDengine advanced functions such as connection man Parameter bound single row insert. -* `func (conn *Connector) StmtQuery(sql string, params *param.Param) (rows driver.Rows, err error)` - - Parameter bound query that returns the `Rows` structure of the `database/sql/driver` package. - -* `func (conn *Connector) InsertStmt() *insertstmt. +* `func (conn *Connector) InsertStmt() *insertstmt.InsertStmt` Initialize the parameters. @@ -412,4 +411,4 @@ The `af` package encapsulates TDengine advanced functions such as connection man ## API Reference -Full API see [driver-go documentation](https://pkg.go.dev/github.com/taosdata/driver-go/v2) +Full API see [driver-go documentation](https://pkg.go.dev/github.com/taosdata/driver-go/v3) diff --git a/docs/en/14-reference/03-connector/rust.mdx b/docs/en/14-reference/03-connector/06-rust.mdx similarity index 57% rename from docs/en/14-reference/03-connector/rust.mdx rename to docs/en/14-reference/03-connector/06-rust.mdx index ab06f72069e29361a033f724308d950afe6e8d42..1184c98a287cac8a214aff67ace01f7a836940e5 100644 --- a/docs/en/14-reference/03-connector/rust.mdx +++ b/docs/en/14-reference/03-connector/06-rust.mdx @@ -1,6 +1,5 @@ --- toc_max_heading_level: 4 -sidebar_position: 5 sidebar_label: Rust title: TDengine Rust Connector --- @@ -8,43 +7,45 @@ title: TDengine Rust Connector import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -import Preparation from "./_preparation.mdx" +import Preparition from "./_preparition.mdx" import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx" import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx" import RustQuery from "../../07-develop/04-query-data/_rust.mdx" -[`taos`][taos] is the official Rust language connector for TDengine. Rust developers can develop applications to access the TDengine instance data. +[![Crates.io](https://img.shields.io/crates/v/taos)](https://crates.io/crates/taos) ![Crates.io](https://img.shields.io/crates/d/taos) [![docs.rs](https://img.shields.io/docsrs/taos)](https://docs.rs/taos) -Rust connector provides two ways to establish connections. One is the **Native Connection**, which connects to TDengine instances via the TDengine client driver (taosc). The other is **Websocket connection**, which connects to TDengine instances via taosAdapter service. +`taos` is the official Rust connector for TDengine. Rust developers can develop applications to access the TDengine instance data. -The source code is hosted on [taosdata/taos-connector-rust](https://github.com/taosdata/taos-connector-rust). +`taos` provides two ways to establish connections. One is the **Native Connection**, which connects to TDengine instances via the TDengine client driver (taosc). The other is the **WebSocket connection**, which connects to TDengine instances via the WebSocket interface provided by taosAdapter. You can specify a connection type with Cargo features. By default, both types are supported. The Websocket connection can be used on any platform. The native connection can be used on any platform that the TDengine Client supports. + +The source code for the Rust connectors is located on [GitHub](https://github.com/taosdata/taos-connector-rust). ## Supported platforms -The platforms supported by native connections are the same as those supported by the TDengine client driver. -REST connections are supported on all platforms that can run Rust. +Native connections are supported on the same platforms as the TDengine client driver. +Websocket connections are supported on all platforms that can run Go. ## Version support -Please refer to [version support list](/reference/connector#version-support). +Please refer to [version support list](/reference/connector#version-support) The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues. ## Installation -### Pre-installation +### Pre-installation preparation * Install the Rust development toolchain * If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver) -### Add dependencies +# Add taos dependency -Add the dependency to the [Rust](https://rust-lang.org) project as follows, depending on the connection method selected. +Depending on the connection method, add the [taos][taos] dependency in your Rust project as follows: - + -Add [taos] to the `Cargo.toml` file. +In `cargo.toml`, add [taos][taos]: ```toml [dependencies] @@ -53,9 +54,10 @@ taos = "*" ``` - -Add [taos] to the `Cargo.toml` file. + + +In `cargo.toml`, add [taos][taos] and enable the native feature: ```toml [dependencies] @@ -65,7 +67,7 @@ taos = { version = "*", default-features = false, features = ["native"] } -Add [taos] to the `Cargo.toml` file and enable the `ws` feature. +In `cargo.toml`, add [taos][taos] and enable the ws feature: ```toml [dependencies] @@ -75,15 +77,15 @@ taos = { version = "*", default-features = false, features = ["ws"] } -## Create a connection +## Establishing a connection -In rust connector, we use a DSN connection string as a connection builder. For example, +[TaosBuilder] creates a connection constructor through the DSN connection description string. ```rust let builder = TaosBuilder::from_dsn("taos://")?; ``` -You can now use connection client to create the connection. +You can now use this object to create the connection. ```rust let conn = builder.build()?; @@ -96,9 +98,7 @@ let conn1 = builder.build()?; let conn2 = builder.build()?; ``` -DSN is short for **D**ata **S**ource **N**ame string - [a data structure used to describe a connection to a data source](https://en.wikipedia.org/wiki/Data_source_name). - -A common DSN is basically constructed as this: +The structure of the DSN description string is as follows: ```text [+]://[[:@]:][/][?=[&=]] @@ -106,31 +106,31 @@ A common DSN is basically constructed as this: |driver| protocol | | username | password | host | port | database | params | ``` -- **Driver**: the main entrypoint to a processer. **Required**. In Rust connector, the supported driver names are listed here: - - **taos**: the legacy TDengine connection data source. - - **tmq**: subscription data source from TDengine. - - **http/ws**: use websocket protocol via `ws://` scheme. - - **https/wss**: use websocket protocol via `wss://` scheme. -- **Protocol**: the additional information appended to driver, which can be be used to support different kind of data sources. By default, leave it empty for native driver(only under feature "native"), and `ws/wss` for websocket driver (only under feature "ws"). **Optional**. -- **Username**: as its definition, is the username to the connection. **Optional**. -- **Password**: the password of the username. **Optional**. -- **Host**: address host to the datasource. **Optional**. -- **Port**: address port to the datasource. **Optional**. -- **Database**: database name or collection name in the datasource. **Optional**. -- **Params**: a key-value map for any other informations to the datasource. **Optional**. - -Here is a simple DSN connection string example: +The parameters are described as follows: + +- **driver**: Specify a driver name so that the connector can choose which method to use to establish the connection. Supported driver names are as follows: + - **taos**: Table names use the TDengine connector driver. + - **tmq**: Use the TMQ to subscribe to data. + - **http/ws**: Use Websocket to establish connections. + - **https/wss**: Use Websocket to establish connections, and enable SSL/TLS. +- **protocol**: Specify which connection method to use. For example, `taos+ws://localhost:6041` uses Websocket to establish connections. +- **username/password**: Username and password used to create connections. +- **host/port**: Specifies the server and port to establish a connection. If you do not specify a hostname or port, native connections default to `localhost:6030` and Websocket connections default to `localhost:6041`. +- **database**: Specify the default database to connect to. +- **params**:Optional parameters. + +A sample DSN description string is as follows: ```text taos+ws://localhost:6041/test ``` -which means connect `localhost` with port `6041` via `ws` protocol, and make `test` as the default database. +This indicates that the Websocket connection method is used on port 6041 to connect to the server localhost and use the database `test` by default. -So that you can use DSN to specify connection protocol at runtime: +You can create DSNs to connect to servers in your environment. ```rust -use taos::*; // use it like a `prelude` mod, we need some traits at next. +use taos::*; // use native protocol. let builder = TaosBuilder::from_dsn("taos://localhost:6030")?; @@ -140,7 +140,7 @@ let conn1 = builder.build(); let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?; ``` -After connected, you can perform the following operations on the database. +After the connection is established, you can perform operations on your database. ```rust async fn demo(taos: &Taos, db: &str) -> Result<(), Error> { @@ -179,7 +179,7 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> { } ``` -Rust connector provides two kinds of ways to fetch data: +There are two ways to query data: Using built-in types or the [serde](https://serde.rs) deserialization framework. ```rust // Query option 1, use rows stream. @@ -225,41 +225,41 @@ Rust connector provides two kinds of ways to fetch data: -#### Stmt bind +#### STMT Write ### Query data -| + ## API Reference -### Connector builder +### Connector Constructor -Use DSN to directly construct a TaosBuilder object. +You create a connector constructor by using a DSN. ```rust -let builder = TaosBuilder::from_dsn("")? ; +let cfg = TaosBuilder::default().build()?; ``` -Use `builder` to create many connections: +You use the builder object to create multiple connections. ```rust let conn: Taos = cfg.build(); ``` -### Connection pool +### Connection pooling -In complex applications, we recommend enabling connection pools. Connection pool for [taos] is implemented using [r2d2] by enabling "r2d2" feature. +In complex applications, we recommend enabling connection pools. [taos] implements connection pools based on [r2d2]. -Basically, a connection pool with default parameters can be generated as: +As follows, a connection pool with default parameters can be generated. ```rust let pool = TaosBuilder::from_dsn(dsn)?.pool()?; ``` -You can set the connection pool parameters using the `PoolBuilder`. +You can set the same connection pool parameters using the connection pool's constructor. ```rust let dsn = "taos://localhost:6030"; @@ -273,23 +273,23 @@ let opts = PoolBuilder::new() let pool = TaosBuilder::from_dsn(dsn)?.with_pool_builder(opts)?; ``` -In the application code, use `pool.get()?` to get a connection object [Taos]. +In the application code, use `pool.get()? ` to get a connection object [Taos]. ```rust -let taos = pool.get()? ; +let taos = pool.get()?; ``` -### Connection methods +# Connectors -The [Taos] connection struct provides several APIs for convenient use. +The [Taos][struct.Taos] object provides an API to perform operations on multiple databases. -1. `exec`: Execute some non-query SQL statements, such as `CREATE`, `ALTER`, `INSERT` etc. and return affected rows (only meaningful to `INSERT`). +1. `exec`: Execute some non-query SQL statements, such as `CREATE`, `ALTER`, `INSERT`, etc. ```rust let affected_rows = taos.exec("INSERT INTO tb1 VALUES(now, NULL)").await?; ``` -2. `exec_many`: You can execute many SQL statements in order with `exec_many` method. +2. `exec_many`: Run multiple SQL statements simultaneously or in order. ```rust taos.exec_many([ @@ -299,15 +299,15 @@ The [Taos] connection struct provides several APIs for convenient use. ]).await?; ``` -3. `query`: Execute the query statement and return the [ResultSet] object. +3. `query`: Run a query statement and return a [ResultSet] object. ```rust - let mut q = taos.query("select * from log.logs").await? + let mut q = taos.query("select * from log.logs").await?; ``` - The [ResultSet] object stores the query result data and basic information about the returned columns (column name, type, length). + The [ResultSet] object stores query result data and the names, types, and lengths of returned columns - Get filed information with `fields` method. + You can obtain column information by using [.fields()]. ```rust let cols = q.fields(); @@ -316,7 +316,7 @@ The [Taos] connection struct provides several APIs for convenient use. } ``` - Users could fetch data by rows. + It fetches data line by line. ```rust let mut rows = result.rows(); @@ -332,7 +332,7 @@ The [Taos] connection struct provides several APIs for convenient use. } ``` - Or use it with [serde](https://serde.rs) deserialization. + Or use the [serde](https://serde.rs) deserialization framework. ```rust #[derive(Debug, Deserialize)] @@ -359,15 +359,17 @@ The [Taos] connection struct provides several APIs for convenient use. Note that Rust asynchronous functions and an asynchronous runtime are required. -[Taos] provides a few Rust methods that encapsulate SQL to reduce the frequency of `format!` code blocks. +[Taos][struct.Taos] provides Rust methods for some SQL statements to reduce the number of `format!`s. - `.describe(table: &str)`: Executes `DESCRIBE` and returns a Rust data structure. - `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement. - `.use_database(database: &str)`: Executes the `USE` statement. -### Bind API +In addition, this structure is also the entry point for [Parameter Binding](#Parameter Binding Interface) and [Line Protocol Interface](#Line Protocol Interface). Please refer to the specific API descriptions for usage. + +### Bind Interface -Similar to the C interface, Rust provides the bind interface's wrapping. First, create a bind object [Stmt] for a SQL command with the [Taos] object. +Similar to the C interface, Rust provides the bind interface's wrapping. First, the [Taos][struct.taos] object creates a parameter binding object [Stmt] for an SQL statement. ```rust let mut stmt = Stmt::init(&taos).await?; @@ -387,17 +389,17 @@ stmt.set_tbname("d0")?; #### `.set_tags(&[tag])` -Bind tag values when the SQL statement uses a super table. +Bind sub-table table names and tag values when the SQL statement uses a super table. ```rust let mut stmt = taos.stmt("insert into ? using stb0 tags(?) values(? ,?)")?; stmt.set_tbname("d0")?; -stmt.set_tags(&[Value::VarChar("涛思".to_string())])?; +stmt.set_tags(&[Value::VarChar("taos".to_string())])?; ``` #### `.bind(&[column])` -Bind value types. Use the [ColumnView] structure to construct the desired type and bind. +Bind value types. Use the [ColumnView] structure to create and bind the required types. ```rust let params = vec![ @@ -421,42 +423,42 @@ let rows = stmt.bind(¶ms)?.add_batch()?.execute()?; #### `.execute()` -Execute to insert all bind records. [Stmt] objects can be reused, re-bind, and executed after execution. Remember to call `add_batch` before `execute`. +Execute SQL. [Stmt] objects can be reused, re-binded, and executed after execution. Before execution, ensure that all data has been added to the queue with `.add_batch`. ```rust -stmt.add_batch()?.execute()?; +stmt.execute()?; // next bind cycle. -// stmt.set_tbname()? ; -//stmt.bind()? ; -//stmt.add_batch().execute()? ; +//stmt.set_tbname()?; +//stmt.bind()?; +//stmt.execute()?; ``` -A runnable example for bind can be found [here](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs). +For a working example, see [GitHub](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs). -### Subscription API +### Subscriptions -Users can subscribe a [TOPIC](../../../taos-sql/tmq/) with TMQ(the TDengine Message Queue) API. +TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/). -Start from a TMQ builder: +You create a TMQ connector by using a DSN. ```rust let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; ``` -Build a consumer: +Create a consumer: ```rust let mut consumer = tmq.build()?; ``` -Subscribe a topic: +A single consumer can subscribe to one or more topics. ```rust consumer.subscribe(["tmq_meters"]).await?; ``` -Consume messages, and commit the offset for each message. +The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) type. You can use the corresponding API to consume each message in the queue and then use `.commit` to mark them as consumed. ```rust { @@ -495,22 +497,21 @@ Unsubscribe: consumer.unsubscribe().await; ``` -In TMQ DSN, you must choose to subscribe with a group id. Also, there's several options could be set: - -- `group.id`: **Required**, a group id is any visible string you set. -- `client.id`: a optional client description string. -- `auto.offset.reset`: choose to subscribe from *earliest* or *latest*, default is *none* which means 'earliest'. -- `enable.auto.commit`: automatically commit with specified time interval. By default - in the recommended way _ you must use `commit` to ensure that you've consumed the messages correctly, otherwise, consumers will received repeated messages when re-subscribe. -- `auto.commit.interval.ms`: the auto commit interval in milliseconds. +The following parameters can be configured for the TMQ DSN. Only `group.id` is mandatory. -Check the whole subscription example at [GitHub](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs). +- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis. +- `client.id`: Subscriber client ID. +- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group. +- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential. +- `auto.commit.interval.ms`: Interval for automatic commits. + +For more information, see [GitHub sample file](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs). -Please move to the Rust documentation hosting page for other related structure API usage instructions: . +For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos). -[TDengine]: https://github.com/taosdata/TDengine +[taos]: https://github.com/taosdata/rust-connector-taos [r2d2]: https://crates.io/crates/r2d2 -[Taos]: https://docs.rs/taos/latest/taos/struct.Taos.html -[ResultSet]: https://docs.rs/taos/latest/taos/struct.ResultSet.html -[Value]: https://docs.rs/taos/latest/taos/enum.Value.html -[Stmt]: https://docs.rs/taos/latest/taos/stmt/struct.Stmt.html -[taos]: https://crates.io/crates/taos +[TaosBuilder]: https://docs.rs/taos/latest/taos/struct.TaosBuilder.html +[TaosCfg]: https://docs.rs/taos/latest/taos/struct.TaosCfg.html +[struct.Taos]: https://docs.rs/taos/latest/taos/struct.Taos.html +[Stmt]: https://docs.rs/taos/latest/taos/struct.Stmt.html diff --git a/docs/en/14-reference/03-connector/python.mdx b/docs/en/14-reference/03-connector/07-python.mdx similarity index 89% rename from docs/en/14-reference/03-connector/python.mdx rename to docs/en/14-reference/03-connector/07-python.mdx index 04eb2e57d4455a83b62633ecb988cb64bf837fea..fc95033baada48e7f322a06d00cede16fb827c51 100644 --- a/docs/en/14-reference/03-connector/python.mdx +++ b/docs/en/14-reference/03-connector/07-python.mdx @@ -1,5 +1,4 @@ --- -sidebar_position: 3 sidebar_label: Python title: TDengine Python Connector description: "taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. tasopy wraps both the native and REST interfaces of TDengine, corresponding to the two submodules of tasopy: taos and taosrest. In addition to wrapping the native and REST interfaces, taospy also provides a programming interface that conforms to the Python Data Access Specification (PEP 249), making it easy to integrate taospy with many third-party tools, such as SQLAlchemy and pandas." @@ -8,14 +7,14 @@ description: "taospy is the official Python connector for TDengine. taospy provi import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -`taospy` is the official Python connector for TDengine. `taospy` provides a rich set of APIs that makes it easy for Python applications to access TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively. +`taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively. In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/). The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection". The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python). -## Supported Platforms +## Supported platforms - The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client. - REST connections are supported on all platforms that can run Python. @@ -35,7 +34,6 @@ We recommend using the latest version of `taospy`, regardless of the version of 1. Install Python. Python >= 3.6 is recommended. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it. 2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it. - If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI. ### Install via pip @@ -80,7 +78,7 @@ pip3 install git+https://github.com/taosdata/taos-connector-python.git -### Installation verification +### Verify @@ -110,10 +108,11 @@ If you have multiple versions of Python on your system, you may have various `pi C:\> pip3 install taospy Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple Requirement already satisfied: taospy in c:\users\username\appdata\local\programs\python\python310\lib\site-packages (2.3.0) +``` ::: -## Establish connection +## Establishing a connection ### Connectivity testing @@ -150,10 +149,19 @@ If the test is successful, it will output the server version information, e.g. ```json { - "status": "succ", - "head": ["server_version()"], - "column_meta": [["server_version()", 8, 8]], - "data": [["2.4.0.16"]], + "code": 0, + "column_meta": [ + [ + "server_version()", + "VARCHAR", + 7 + ] + ], + "data": [ + [ + "3.0.0.0" + ] + ], "rows": 1 } ``` @@ -202,12 +210,12 @@ All arguments to the `connect()` function are optional keyword arguments. The fo - `url`: The URL of taosAdapter REST service. The default is . - `user`: TDengine user name. The default is `root`. - `password`: TDengine user password. The default is `taosdata`. -- `timeout`: HTTP request timeout in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed. +- `timeout`: HTTP request timeout. Enter a value in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed. -## Sample program +## Example program ### Basic Usage @@ -255,12 +263,12 @@ The TaosCursor class uses native connections for write and query operations. In ##### Use of TaosRestCursor class -The ``TaosRestCursor`` class is an implementation of the PEP249 Cursor interface. +The `TaosRestCursor` class is an implementation of the PEP249 Cursor interface. ```python title="Use of TaosRestCursor" {{#include docs/examples/python/connect_rest_examples.py:basic}} ``` -- `cursor.execute` : Used to execute arbitrary SQL statements. +- `cursor.execute`: Used to execute arbitrary SQL statements. - `cursor.rowcount` : For write operations, returns the number of successful rows written. For query operations, returns the number of rows in the result set. - `cursor.description` : Returns the description of the field. Please refer to [TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html) for the specific format of the description information. @@ -274,6 +282,8 @@ The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-ap For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html). + + @@ -304,8 +314,7 @@ For a more detailed description of the `sql()` method, please refer to [RestClie | [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | bind_row.py | [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB line protocol writing | | [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | Use JSON type tags | -| [subscribe-async.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/subscribe-async.py) | Asynchronous subscription | -| [subscribe-sync.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/subscribe-sync.py) | synchronous-subscribe | +| [tmq.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/tmq.py) | TMQ subscription | ## Other notes @@ -324,22 +333,15 @@ Due to the current imperfection of Python's nanosecond support (see link below), 1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds 2. https://www.python.org/dev/peps/pep-0564/ - -## Frequently Asked Questions - -Welcome to [ask questions or report questions](https://github.com/taosdata/taos-connector-python/issues). - ## Important Update -| Connector version | Important Update | Release date | -| ---------- | --------------------------------------------------------------------------------- | ---------- | -| 2.3.1 | 1. support TDengine REST API
    2. remove support for Python version below 3.6 | 2022-04-28 | -| 2.2.5 | support timezone option when connect | 2022-04-13 | -| 2.2.2 | support sqlalchemy dialect plugin | 2022-03-28 | - [**Release Notes**] (https://github.com/taosdata/taos-connector-python/releases) ## API Reference - [taos](https://docs.taosdata.com/api/taospy/taos/) - [taosrest](https://docs.taosdata.com/api/taospy/taosrest) + +## Frequently Asked Questions + +Welcome to [ask questions or report questions](https://github.com/taosdata/taos-connector-python/issues). diff --git a/docs/en/14-reference/03-connector/08-node.mdx b/docs/en/14-reference/03-connector/08-node.mdx new file mode 100644 index 0000000000000000000000000000000000000000..f93632b4171a0b563a17f80533d3ede4c76e4425 --- /dev/null +++ b/docs/en/14-reference/03-connector/08-node.mdx @@ -0,0 +1,252 @@ +--- +toc_max_heading_level: 4 +sidebar_label: Node.js +title: TDengine Node.js Connector +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +import Preparition from "./_preparition.mdx"; +import NodeInsert from "../../07-develop/03-insert-data/_js_sql.mdx"; +import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx"; +import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx"; +import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx"; +import NodeQuery from "../../07-develop/04-query-data/_js.mdx"; + +`@tdengine/client` and `@tdengine/rest` are the official Node.js connectors. Node.js developers can develop applications to access TDengine instance data. Note: The connectors for TDengine 3.0 are different than those for TDengine 2.x. The new connectors do not support TDengine 2.x. + +`@tdengine/client` is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. `@tdengine/rest` is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The REST connector can run on any platform, but performance is slightly degraded, and the interface implements a somewhat different set of functional features than the native interface. + +The source code for the Node.js connectors is located on [GitHub](https://github.com/taosdata/taos-connector-node/tree/3.0). + +## Supported platforms + +The platforms supported by the native connector are the same as those supported by the TDengine client driver. +The REST connector supports all platforms that can run Node.js. + +## Version support + +Please refer to [version support list](/reference/connector#version-support) + +## Supported features + +### Native connectors + +1. Connection Management +2. General Query +3. Continuous Query +4. Parameter Binding +5. Subscription +6. Schemaless + +### REST Connector + +1. Connection Management +2. General Query +3. Continuous Query + +## Installation Steps + +### Pre-installation preparation + +- Install the Node.js development environment +- If you are using the REST connector, skip this step. However, if you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector#Install-Client-Driver) for more details. We use [node-gyp](https://github.com/nodejs/node-gyp) to interact with TDengine instances and also need to install some dependencies mentioned below depending on the specific OS. + + + + +- `python` (recommended for `v2.7` , `v3.x.x` currently not supported) +- `@tdengine/client` 3.0.0 supports Node.js LTS v10.9.0 or later and Node.js LTS v12.8.0 or later. Older versions may be incompatible. +- `make` +- C compiler, [GCC](https://gcc.gnu.org) v4.8.5 or higher + + + + +- Installation method 1 + +Use Microsoft's [windows-build-tools](https://github.com/felixrieseberg/windows-build-tools) to execute `npm install --global --production` from the `cmd` command-line interface to install all the necessary tools. + +- Installation method 2 + +Manually install the following tools. + +- Install Visual Studio related: [Visual Studio Build Tools](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) or [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) +- Install [Python](https://www.python.org/downloads/) 2.7 (`v3.x.x` is not supported) and execute `npm config set python python2.7`. +- Go to the `cmd` command-line interface, `npm config set msvs_version 2017` + +Refer to Microsoft's Node.js User Manual [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules). + +If using ARM64 Node.js on Windows 10 ARM, you must add "Visual C++ compilers and libraries for ARM64" and "Visual C++ ATL for ARM64". + + + + +### Install via npm + + + + +```bash +npm install @tdengine/client +``` + + + + +```bash +npm install @tdengine/rest +``` + + + + +### Verify + +After installing the TDengine client, use the `nodejsChecker.js` program to verify that the current environment supports Node.js access to TDengine. + +Verification in details: + +- Create an installation test folder such as `~/tdengine-test`. Download the [nodejsChecker.js source code](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/nodejsChecker.js) to your local machine. + +- Execute the following command from the command-line. + +```bash +npm init -y +npm install @tdengine/client +node nodejsChecker.js host=localhost +``` + +- After executing the above steps, the command-line will output the result of `nodejsChecker.js` connecting to the TDengine instance and performing a simple insert and query. + +## Establishing a connection + +Please choose to use one of the connectors. + + + + +Install and import the `@tdengine/client` package. + +```javascript +//A cursor also needs to be initialized in order to interact with TDengine from Node.js. +const taos = require("@tdengine/client"); +var conn = taos.connect({ + host: "127.0.0.1", + user: "root", + password: "taosdata", + config: "/etc/taos", + port: 0, +}); +var cursor = conn.cursor(); // Initializing a new cursor + +//Close a connection +conn.close(); +``` + + + + +Install and import the `@tdengine/rest` package. + +```javascript +//A cursor also needs to be initialized in order to interact with TDengine from Node.js. +import { options, connect } from "@tdengine/rest"; +options.path = "/rest/sql"; +// set host +options.host = "localhost"; +// set other options like user/passwd + +let conn = connect(options); +let cursor = conn.cursor(); +``` + + + + +## Usage examples + +### Write data + +#### SQL Write + + + +#### InfluxDB line protocol write + + + +#### OpenTSDB Telnet line protocol write + + + +#### OpenTSDB JSON line protocol write + + + +### Querying data + + + + +## More sample programs + +| Sample Programs | Sample Program Description | +| --------------------------------------------------------------------------------------------------------------------------------- --------- | -------------------------------------- | +| [basicUse](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/queryExample.js) | Basic operations such as establishing connections and running SQl commands. | +| [stmtBindBatch](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/bindParamBatch.js) | Binding multi-line parameter insertion. | | +| [stmtBindSingleParamBatch](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/bindSingleParamBatch.js) | Columnar binding parameter insertion | +| [stmtQuery](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/stmtQuery.js) | Binding parameter query | +| [schemless insert](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/schemaless.js) | Schemaless insert | +| [TMQ](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/tmq.js) | Using data subscription | +| [asyncQuery](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/asyncQueryExample.js) | Using asynchronous queries | +| [REST](https://github.com/taosdata/taos-connector-node/blob/3.0/typescript-rest/example/example.ts) | Using TypeScript with the REST connector | + +## Usage limitations + +`@tdengine/client` 3.0.0 supports Node.js LTS v12.8.0 to 12.9.1 and 10.9.0 to 10.20.0. + + + + + +## Frequently Asked Questions + +1. Using REST connections requires starting taosadapter. + + ```bash + sudo systemctl start taosadapter + ``` + +2. Node.js versions + + `@tdengine/client` supports Node.js v10.9.0 to 10.20.0 and 12.8.0 to 12.9.1. + +3. "Unable to establish connection", "Unable to resolve FQDN" + + Usually, the root cause is an incorrect FQDN configuration. You can refer to this section in the [FAQ](https://docs.tdengine.com/2.4/train-faq/faq/#2-how-to-handle-unable-to-establish-connection) to troubleshoot. + +## Important update records + +### Native connectors + +| package name | version | TDengine version | Description | +|------------------|---------|---------------------|------------------------------------------------------------------| +| @tdengine/client | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. | +| td2.0-connector | 2.0.12 | 2.4.x;2.5.x;2.6.x | Fixed cursor.close() bug. | +| td2.0-connector | 2.0.11 | 2.4.x;2.5.x;2.6.x | Supports parameter binding, JSON tags and schemaless interface | +| td2.0-connector | 2.0.10 | 2.4.x;2.5.x;2.6.x | Supports connection management, standard queries, connection queries, system information, and data subscription | +### REST Connector + +| package name | version | TDengine version | Description | +|----------------------|---------|---------------------|---------------------------------------------------------------------------| +| @tdengine/rest | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. | +| td2.0-rest-connector | 1.0.7 | 2.4.x;2.5.x;2.6.x | Removed default port 6041。 | +| td2.0-rest-connector | 1.0.6 | 2.4.x;2.5.x;2.6.x | Fixed affectRows bug with create, insert, update, and alter. | +| td2.0-rest-connector | 1.0.5 | 2.4.x;2.5.x;2.6.x | Support cloud token | +| td2.0-rest-connector | 1.0.3 | 2.4.x;2.5.x;2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries | + +## API Reference + +[API Reference](https://docs.taosdata.com/api/td2.0-connector/) \ No newline at end of file diff --git a/docs/en/14-reference/03-connector/csharp.mdx b/docs/en/14-reference/03-connector/09-csharp.mdx similarity index 72% rename from docs/en/14-reference/03-connector/csharp.mdx rename to docs/en/14-reference/03-connector/09-csharp.mdx index 2d1b62fe89c542280c4264dd478538fa00634c79..823e9075993e0a4fd32abafbbd4e95f1cc7bc2cf 100644 --- a/docs/en/14-reference/03-connector/csharp.mdx +++ b/docs/en/14-reference/03-connector/09-csharp.mdx @@ -1,6 +1,5 @@ --- toc_max_heading_level: 4 -sidebar_position: 7 sidebar_label: C# title: C# Connector --- @@ -8,7 +7,7 @@ title: C# Connector import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -import Preparation from "./_preparation.mdx" +import Preparition from "./_preparition.mdx" import CSInsert from "../../07-develop/03-insert-data/_cs_sql.mdx" import CSInfluxLine from "../../07-develop/03-insert-data/_cs_line.mdx" import CSOpenTSDBTelnet from "../../07-develop/03-insert-data/_cs_opts_telnet.mdx" @@ -16,16 +15,17 @@ import CSOpenTSDBJson from "../../07-develop/03-insert-data/_cs_opts_json.mdx" import CSQuery from "../../07-develop/04-query-data/_cs.mdx" import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx" - `TDengine.Connector` is a C# language connector provided by TDengine that allows C# developers to develop C# applications that access TDengine cluster data. The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc. The `TDengine.Connector` currently does not provide a REST connection interface. Developers can write their RESTful application by referring to the [REST API](/reference/rest-api/) documentation. This article describes how to install `TDengine.Connector` in a Linux or Windows environment and connect to TDengine clusters via `TDengine.Connector` to perform basic operations such as data writing and querying. -The source code of `TDengine.Connector` is hosted on [GitHub](https://github.com/taosdata/taos-connector-dotnet). +Note: TDengine Connector 3.x is not compatible with TDengine 2.x. In an environment with TDengine 2.x, you must use TDengine.Connector 1.x for the C# connector. + +The source code of `TDengine.Connector` is hosted on [GitHub](https://github.com/taosdata/taos-connector-dotnet/tree/3.0). -## Supported Platforms +## Supported platforms The supported platforms are the same as those supported by the TDengine client driver. @@ -57,29 +57,29 @@ Please refer to [version support list](/reference/connector#version-support) You can reference the `TDengine.Connector` published in Nuget to the current project via the `dotnet` command under the path of the existing .NET project. -``` +``` bash dotnet add package TDengine.Connector ``` -You can download TDengine's source code and directly reference the latest version of the TDengine.Connector library +You can [download the source code](https://github.com/taosdata/taos-connector-dotnet/tree/3.0) and directly reference the latest version of the TDengine.Connector library. -``` -git clone https://github.com/taosdata/TDengine.git -cd TDengine/src/connector/C#/src/ -cp -r TDengineDriver/ myProject +```bash +git clone -b 3.0 https://github.com/taosdata/taos-connector-dotnet.git +cd taos-connector-dotnet +cp -r src/ myProject cd myProject -dotnet add TDengineDriver/TDengineDriver.csproj +dotnet add exmaple.csproj reference src/TDengine.csproj ``` -## Create a connection +## Establish a Connection -```csharp +``` C# using TDengineDriver; namespace TDengineExample @@ -146,25 +146,24 @@ namespace TDengineExample |Sample program |Sample program description | |--------------------------------------------------------------------------------------------------------------------|------------ --------------------------------| -| [C#checker](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/C%23checker) | Using TDengine.Connector, you can test C# Driver's synchronous writes and queries | -| [TDengineTest](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/TDengineTest) | A simple example of writing and querying using TDengine. -| [insertCn](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/insertCn) | Example of writing and querying Chinese characters using TDengine. -| [jsonTag](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/jsonTag) | Example of writing and querying JSON tag type data using TDengine. -| [stmt](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/stmt) | Example of parameter binding using TDengine. -| [schemaless](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/schemaless) | Example of writing with schemaless implemented using TDengine. |schemaless -| [benchmark](https://github.com/taosdata/TDengine/tree/develop/examples/C%23/taosdemo) | A simple benchmark implemented using TDengine. -| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/develop/examples/QueryAsyncSample.cs) | Example of an asynchronous query implemented using TDengine. Example of an asynchronous query -| [subscribe](https://github.com/taosdata/taos-connector-dotnet/blob/develop/examples/SubscribeSample.cs) | Example of subscribing to data using TDengine. Data example +| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector | +| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector | +| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | Parameter binding with TDengine Connector | +| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | Schemaless writes with TDengine Connector | +| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector | +| [TMQ](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | Data subscription with TDengine Connector | ## Important update records | TDengine.Connector | Description | |--------------------|--------------------------------| +| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. | +| 1.0.7 | Fixed TDengine.Query() memory leak. | | 1.0.6 | Fix schemaless bug in 1.0.4 and 1.0.5. | -| 1.0.5 | Fix Windows sync query Chinese error bug. | 1.0.4 | Fix schemaless bug. -| 1.0.4 | Add asynchronous query, subscription, and other functions. Fix the binding parameter bug. -| 1.0.3 | Add parameter binding, schemaless, JSON tag, etc. | new -| 1.0.2 | Add connection management, synchronous query, error messages, etc. ## Other +| 1.0.5 | Fix Windows sync query Chinese error bug. | 1.0.4 | Fix schemaless bug. | +| 1.0.4 | Add asynchronous query, subscription, and other functions. Fix the binding parameter bug. | +| 1.0.3 | Add parameter binding, schemaless, JSON tag, etc. | +| 1.0.2 | Add connection management, synchronous query, error messages, etc. | ## Other descriptions @@ -173,13 +172,12 @@ namespace TDengineExample `Taos` is an ADO.NET connector for TDengine, supporting Linux and Windows platforms. Community contributor `Maikebing@@maikebing contributes the connector`. Please refer to: * Interface download: -* Usage notes: ## Frequently Asked Questions 1. "Unable to establish connection", "Unable to resolve FQDN" - Usually, it's caused by an incorrect FQDN configuration. Please refer to this section in the [FAQ](https://docs.tdengine.com/2.4/train-faq/faq/#2-how-to-handle-unable-to-establish-connection) to troubleshoot. + Usually, it's caused by an incorrect FQDN configuration. Please refer to this section in the [FAQ](https://docs.tdengine.com/2.4/train-faq/faq/#2-how-to-handle-unable-to-establish-connection) to troubleshoot. 2. Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: The specified module cannot be found. diff --git a/docs/en/14-reference/03-connector/php.mdx b/docs/en/14-reference/03-connector/10-php.mdx similarity index 98% rename from docs/en/14-reference/03-connector/php.mdx rename to docs/en/14-reference/03-connector/10-php.mdx index 69dcce91e80fa05face1ffb35effe1ce1efa2631..820f70375982eb54cdd87602b891e5f04756c0e5 100644 --- a/docs/en/14-reference/03-connector/php.mdx +++ b/docs/en/14-reference/03-connector/10-php.mdx @@ -1,5 +1,4 @@ --- -sidebar_position: 1 sidebar_label: PHP title: PHP Connector --- @@ -61,7 +60,7 @@ phpize && ./configure && make -j && make install **Specify TDengine location:** ```shell -phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install +phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install ``` > `--with-tdengine-dir=` is followed by TDengine location. diff --git a/docs/en/14-reference/03-connector/_linux_install.mdx b/docs/en/14-reference/03-connector/_linux_install.mdx index 3fa123794cd8ff304a1bc13449591194e7320aa9..07f8fb5c7118d84c53017f44d9811a3357944cfc 100644 --- a/docs/en/14-reference/03-connector/_linux_install.mdx +++ b/docs/en/14-reference/03-connector/_linux_install.mdx @@ -1,16 +1,15 @@ -import PkgList from "/components/PkgList"; +import PkgListV3 from "/components/PkgListV3"; -1. Download the TDengine client installation package +1. Download the client installation package - + - [All Packages](https://www.taosdata.com/en/all-downloads/) + [All Downloads](../../releases) 2. Unzip Download the package to any directory the current user has read/write permission. Then execute `tar -xzvf TDengine-client-VERSION.tar.gz` command. The VERSION should be the version of the package you just downloaded. - 3. Execute the install script Once the package is unzipped, you will see the following files in the directory: @@ -18,17 +17,14 @@ import PkgList from "/components/PkgList"; - _ taos.tar.gz_: client driver package - _ driver_: TDengine client driver - _examples_: some example programs of different programming languages (C/C#/go/JDBC/MATLAB/python/R) - You can run `install_client.sh` to install it. - -4. Edit taos.cfg +4. configure taos.cfg Edit `taos.cfg` file (full path is `/etc/taos/taos.cfg` by default), modify `firstEP` with actual TDengine server's End Point, for example `h1.tdengine.com:6030` :::tip 1. If the computer does not run the TDengine service but installs the TDengine client driver, then you need to config `firstEP` in `taos.cfg` only, and there is no need to configure `FQDN`; - 2. If you encounter the "Unable to resolve FQDN" error, please make sure the FQDN in the `/etc/hosts` file of the current computer is correctly configured, or the DNS service is correctly configured. ::: diff --git a/docs/en/14-reference/03-connector/_preparation.mdx b/docs/en/14-reference/03-connector/_preparation.mdx index 07ebdbca3d891ff51a254bc1b83016f1404bb47e..c6e42ce02348595da0fdd75847d6442c285dc10a 100644 --- a/docs/en/14-reference/03-connector/_preparation.mdx +++ b/docs/en/14-reference/03-connector/_preparation.mdx @@ -2,7 +2,7 @@ :::info -Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](/get-started/). For Windows development, you need to install the corresponding [Windows client](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client) for TDengine. +Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](/get-started/). For Windows development, you need to install the corresponding Windows client, please refer to [Install TDengine](../../get-started/package). - libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately. - taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately. diff --git a/docs/zh/14-reference/03-connector/_preparition.mdx b/docs/en/14-reference/03-connector/_preparition.mdx similarity index 100% rename from docs/zh/14-reference/03-connector/_preparition.mdx rename to docs/en/14-reference/03-connector/_preparition.mdx diff --git a/docs/en/14-reference/03-connector/_verify_linux.mdx b/docs/en/14-reference/03-connector/_verify_linux.mdx index 875c9e132b7acbbe95d394ae6cee6f2bd95ddbe0..3438b1578eaa1df38115d80fd67f491c071d619a 100644 --- a/docs/en/14-reference/03-connector/_verify_linux.mdx +++ b/docs/en/14-reference/03-connector/_verify_linux.mdx @@ -2,10 +2,6 @@ Execute TDengine CLI program `taos` directly from the Linux shell to connect to ```text $ taos -Welcome to the TDengine shell from Linux, Client Version:3.0.0.0 -Copyright (c) 2022 by TAOS Data, Inc. All rights reserved. - -Server is Community Edition. taos> show databases; name | create_time | vgroups | ntables | replica | strict | duration | keep | buffer | pagesize | pages | minrows | maxrows | comp | precision | status | retention | single_stable | cachemodel | cachesize | wal_level | wal_fsync_period | wal_retention_period | wal_retention_size | wal_roll_period | wal_seg_size | diff --git a/docs/en/14-reference/03-connector/_verify_windows.mdx b/docs/en/14-reference/03-connector/_verify_windows.mdx index 4813bd24c3b4a2a4bf04af3c397bdadd22e9e399..402b1705110295f4d465c9ae7c734eb3e0ad0b5f 100644 --- a/docs/en/14-reference/03-connector/_verify_windows.mdx +++ b/docs/en/14-reference/03-connector/_verify_windows.mdx @@ -1,11 +1,6 @@ Go to the `C:\TDengine` directory from `cmd` and execute TDengine CLI program `taos.exe` directly to connect to the TDengine service and enter the TDengine CLI interface, for example, as follows: ```text -Welcome to the TDengine shell from Windows, Client Version:3.0.0.0 -Copyright (c) 2022 by TAOS Data, Inc. All rights reserved. - -Server is Community Edition. - taos> show databases; name | create_time | vgroups | ntables | replica | strict | duration | keep | buffer | pagesize | pages | minrows | maxrows | comp | precision | status | retention | single_stable | cachemodel | cachesize | wal_level | wal_fsync_period | wal_retention_period | wal_retention_size | wal_roll_period | wal_seg_size | ========================================================================================================================================================================================================================================================================================================================================================================================================================================================================= diff --git a/docs/en/14-reference/03-connector/_windows_install.mdx b/docs/en/14-reference/03-connector/_windows_install.mdx index 2819be615ee0a80da9f0324d8d41e9b247e8a7f6..ea638ed1ed6c64c3ec4ceaea436f65dd1f09a27e 100644 --- a/docs/en/14-reference/03-connector/_windows_install.mdx +++ b/docs/en/14-reference/03-connector/_windows_install.mdx @@ -1,11 +1,10 @@ -import PkgList from "/components/PkgList"; +import PkgListV3 from "/components/PkgListV3"; 1. Download the client installation package - - - [All downloads](https://www.taosdata.com/cn/all-downloads/) + + [All Downloads](../../releases) 2. Execute the installer, select the default value as prompted, and complete the installation 3. Installation path diff --git a/docs/en/14-reference/03-connector/03-connector.mdx b/docs/en/14-reference/03-connector/index.mdx similarity index 75% rename from docs/en/14-reference/03-connector/03-connector.mdx rename to docs/en/14-reference/03-connector/index.mdx index c3f4530023db19e807565573bd10d41dafcd6f8e..49e2dceec531cf8449749ea9dbb111079771a788 100644 --- a/docs/en/14-reference/03-connector/03-connector.mdx +++ b/docs/en/14-reference/03-connector/index.mdx @@ -8,13 +8,13 @@ TDengine provides a rich set of APIs (application development interface). To fac ## Supported platforms -Currently, TDengine's native interface connectors can support platforms such as X64/ARM64 hardware platforms and Linux/Win64 development environments. The comparison matrix is as follows. +Currently, TDengine's native interface connectors can support platforms such as x64 and ARM hardware platforms and Linux and Windows development environments. The comparison matrix is as follows. -| **CPU** | **OS** | **JDBC** | **Python** | **Go** | **Node.js** | **C#** | **Rust** | C/C++ | -| ------- | ------ | -------- | ---------- | ------ | ----------- | ------ | -------- | ----- | -| **X86 64bit** | **Linux** | ● | ● | ● | ● | ● | ● | ● | -| **X86 64bit** | **Win64** | ● | ● | ● | ● | ● | ● | ● | -| **ARM64** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● | +| **CPU** | **OS** | **Java** | **Python** | **Go** | **Node.js** | **C#** | **Rust** | C/C++ | +| -------------- | --------- | -------- | ---------- | ------ | ----------- | ------ | -------- | ----- | +| **X86 64bit** | **Linux** | ● | ● | ● | ● | ● | ● | ● | +| **X86 64bit** | **Win64** | ● | ● | ● | ● | ● | ● | ● | +| **ARM64** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● | Where ● means the official test verification passed, ○ means the unofficial test verification passed, -- means no assurance. @@ -26,6 +26,7 @@ TDengine version updates often add new features, and the connector versions in t | **TDengine Versions** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** | | --------------------- | -------- | ---------- | ------------ | ------------- | --------------- | -------- | +| **3.0.0.0 and later** | 3.0.0 | current version | 3.0 branch | 3.0.0 | 3.0.0 | current version | | **2.4.0.14 and up** | 2.0.38 | current version | develop branch | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | current version | | **2.4.0.6 and up** | 2.0.37 | current version | develop branch | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | current version | | **2.4.0.4 - 2.4.0.5** | 2.0.37 | current version | develop branch | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | current version | @@ -42,9 +43,8 @@ Comparing the connector support for TDengine functional features as follows. | -------------- | -------- | ---------- | ------ | ------ | ----------- | -------- | | **Connection Management** | Support | Support | Support | Support | Support | Support | | **Regular Query** | Support | Support | Support | Support | Support | Support | -| **Continuous Query** | Support | Support | Support | Support | Support | Support | | **Parameter Binding** | Support | Support | Support | Support | Support | Support | -| **Subscription** | Support | Support | Support | Support | Support | Not Supported | +| ** TMQ ** | Support | Support | Support | Support | Support | Support | | **Schemaless** | Support | Support | Support | Support | Support | Support | | **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported | @@ -52,17 +52,17 @@ Comparing the connector support for TDengine functional features as follows. The different database framework specifications for various programming languages do not mean that all C/C++ interfaces need a wrapper. ::: -### Using the REST interface +### Use HTTP Interfaces (REST or WebSocket) | **Functional Features** | **Java** | **Python** | **Go** | **C# (not supported yet)** | **Node.js** | **Rust** | | ------------------------------ | -------- | ---------- | -------- | ------------------ | ----------- | -------- | | **Connection Management** | Support | Support | Support | N/A | Support | Support | | **Regular Query** | Support | Support | Support | N/A | Support | Support | | **Continous Query ** | Support | Support | Support | N/A | Support | Support | -| **Parameter Binding** | Not Supported | Not Supported | Not Supported | N/A | Not Supported | Not Supported | -| **Subscription** | Not Supported | Not Supported | Not Supported | N/A | Not Supported | Not Supported | -| **Schemaless** | Not supported | Not Supported | Not Supported | N/A | Not Supported | Not supported | -| **Bulk Pulling (based on WebSocket) **| Support | Not Supported | Not Supported | N/A | Not Supported | Not Supported | +| **Parameter Binding** | Not supported | Not supported | Not supported | N/A | Not supported | Support | +| ** TMQ ** | Not supported | Not supported | Not supported | N/A | Not supported | Support | +| **Schemaless** | Not supported | Not supported | Not supported | N/A | Not supported | Not supported | +| **Bulk Pulling (based on WebSocket) **| Support | Support | Not Supported | N/A | Not Supported | Supported | | **DataFrame** | Not supported | Support | Not supported | N/A | Not supported | Not supported | :::warning @@ -85,7 +85,7 @@ The client driver needs to be installed if you use the native interface connecto ::: -### Installation steps +### Install @@ -96,7 +96,7 @@ The client driver needs to be installed if you use the native interface connecto -### Installation Verification +### Verify After completing the above installation and configuration and you have confirmed that the TDengine service is up and running, you can execute the TDengine CLI tool to log in. diff --git a/docs/en/14-reference/03-connector/node.mdx b/docs/en/14-reference/03-connector/node.mdx deleted file mode 100644 index 8f586acde4848af71efcb23358be1f8486cedb8e..0000000000000000000000000000000000000000 --- a/docs/en/14-reference/03-connector/node.mdx +++ /dev/null @@ -1,248 +0,0 @@ ---- -toc_max_heading_level: 4 -sidebar_position: 6 -sidebar_label: Node.js -title: TDengine Node.js Connector ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; - -import Preparation from "./_preparation.mdx"; -import NodeInsert from "../../07-develop/03-insert-data/_js_sql.mdx"; -import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx"; -import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx"; -import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx"; -import NodeQuery from "../../07-develop/04-query-data/_js.mdx"; - -`td2.0-connector` and `td2.0-rest-connector` are the official Node.js language connectors for TDengine. Node.js developers can develop applications to access TDengine instance data. - -`td2.0-connector` is a **native connector** that connects to TDengine instances via the TDengine client driver (taosc) and supports data writing, querying, subscriptions, schemaless writing, and bind interface. The `td2.0-rest-connector` is a **REST connector** that connects to TDengine instances via the REST interface provided by taosAdapter. The REST connector can run on any platform, but performance is slightly degraded, and the interface implements a somewhat different set of functional features than the native interface. - -The Node.js connector source code is hosted on [GitHub](https://github.com/taosdata/taos-connector-node). - -## Supported Platforms - -The platforms supported by the native connector are the same as those supported by the TDengine client driver. -The REST connector supports all platforms that can run Node.js. - -## Version support - -Please refer to [version support list](/reference/connector#version-support) - -## Supported features - -### Native connectors - -1. connection management -2. general query -3. continuous query -4. parameter binding -5. subscription function -6. Schemaless - -### REST Connector - -1. connection management -2. general queries -3. Continuous query - -## Installation steps - -### Pre-installation - -- Install the Node.js development environment -- If you are using the REST connector, skip this step. However, if you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector#Install-Client-Driver) for more details. We use [node-gyp](https://github.com/nodejs/node-gyp) to interact with TDengine instances and also need to install some dependencies mentioned below depending on the specific OS. - - - - -- `python` (recommended for `v2.7` , `v3.x.x` currently not supported) -- `td2.0-connector` 2.0.6 supports Node.js LTS v10.9.0 or later, Node.js LTS v12.8.0 or later; 2.0.5 and earlier support Node.js LTS v10.x versions. Other versions may have package compatibility issues -- `make` -- C compiler, [GCC](https://gcc.gnu.org) v4.8.5 or higher - - - - -- Installation method 1 - -Use Microsoft's [windows-build-tools](https://github.com/felixrieseberg/windows-build-tools) to execute `npm install --global --production` from the `cmd` command-line interface to install all the necessary tools. - -- Installation method 2 - -Manually install the following tools. - -- Install Visual Studio related: [Visual Studio Build Tools](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) or [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) -- Install [Python](https://www.python.org/downloads/) 2.7 (`v3.x.x` is not supported) and execute `npm config set python python2.7`. -- Go to the `cmd` command-line interface, `npm config set msvs_version 2017` - -Refer to Microsoft's Node.js User Manual [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules). - -If using ARM64 Node.js on Windows 10 ARM, you must add "Visual C++ compilers and libraries for ARM64" and "Visual C++ ATL for ARM64". - - - - -### Install via npm - - - - -```bash -npm install td2.0-connector -``` - - - - -```bash -npm i td2.0-rest-connector -``` - - - - -### Installation verification - -After installing the TDengine client, use the `nodejsChecker.js` program to verify that the current environment supports Node.js access to TDengine. - -Verification in details: - -- Create a new installation verification directory, e.g. `~/tdengine-test`, and download the [nodejsChecker.js source code](https://github.com/taosdata/TDengine/tree/develop/examples/nodejs/) from GitHub. to the work directory. - -- Execute the following command from the command-line. - -```bash -npm init -y -npm install td2.0-connector -node nodejsChecker.js host=localhost -``` - -- After executing the above steps, the command-line will output the result of `nodejsChecker.js` connecting to the TDengine instance and performing a simple insert and query. - -## Establishing a connection - -Please choose to use one of the connectors. - - - - -Install and refer to `td2.0-connector` package: - -```javascript -//A cursor also needs to be initialized in order to interact with TDengine from Node.js. -const taos = require("td2.0-connector"); -var conn = taos.connect({ - host: "127.0.0.1", - user: "root", - password: "taosdata", - config: "/etc/taos", - port: 0, -}); -var cursor = conn.cursor(); // Initializing a new cursor - -//Close a connection -conn.close(); -``` - - - - -Install and refer to `td2.0-rest-connector`package: - -```javascript -//A cursor also needs to be initialized in order to interact with TDengine from Node.js. -import { options, connect } from "td2.0-rest-connector"; -options.path = "/rest/sqlt"; -// set host -options.host = "localhost"; -// set other options like user/passwd - -let conn = connect(options); -let cursor = conn.cursor(); -``` - - - - -## Usage examples - -### Write data - -#### SQL Writing - - - -#### InfluxDB line protocol writing - - - -#### OpenTSDB Telnet line protocol writing - - - -#### OpenTSDB JSON line protocol writing - - - -### Query data - - - -## More Sample Programs - -| Sample Programs | Sample Program Description | -| --------------------------------------------------------------------------------------------------------------------------------- --------- | -------------------------------------- | -| [connection](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/cursorClose.js) | Example of establishing a connection. | -| [stmtBindBatch](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtBindParamBatchSample.js) | Example of binding a multi-line parameter Example of inserting. | -| [stmtBind](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtBindParamSample.js) | Example of a line-by-line bind parameter insertion. | -| [stmtBindSingleParamBatch](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/) stmtBindSingleParamBatchSample.js) | Example of binding parameters by column. | -| [stmtUseResult](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtUseResultSample.js) | Example of a bound parameter query. | -| [json tag](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testJsonTag.js) | Example of using Json tag. | -| [Nanosecond](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testNanoseconds.js) | An example of using timestamps with nanosecond precision. | -| [Microsecond](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testMicroseconds.js) | Example of using microsecond timestamp. | -| [schemless insert](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testSchemalessInsert.js) | schemless Example of a schemless insert. | -| [subscribe](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testSubscribe.js) | Example of using subscribe. | -| [asyncQuery](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/tset.js) | An example of using asynchronous queries. | -| [REST](https://github.com/taosdata/taos-connector-node/blob/develop/typescript-rest/example/example.ts) | An example of using TypeScript with REST connections. | - -## Usage restrictions - -Node.js Connector >= v2.0.6 currently supports node versions >=v12.8.0 <= v12.9.1 || >=v10.20.0 <= v10.9.0; v10.x versions are supported in 2.0.5 and earlier, other versions may have package compatibility issues. - -## Other notes - -See [video tutorial](https://www.taosdata.com/blog/2020/11/11/1957.html) for the Node.js connector usage. - -## Frequently Asked Questions - -1. Using REST connections requires starting taosadapter. - - ```bash - sudo systemctl start taosadapter - ``` - -2. "Unable to establish connection", "Unable to resolve FQDN" - - Usually, the root cause is an incorrect FQDN configuration. You can refer to this section in the [FAQ](https://docs.tdengine.com/2.4/train-faq/faq/#2-how-to-handle-unable-to-establish-connection) to troubleshoot. - -## Important Updates - -### Native connectors - -| td2.0-connector version | description | -| -------------------- | ---------------------------------------------------------------- | -| 2.0.12 | Fix bug with cursor.close() error. | 2.0.12 | Fix bug with cursor.close() error. -| 2.0.11 | Support for binding parameters, json tag, schemaless interface, etc. | -| 2.0.10 | Support connection management, general query, continuous query, get system information, subscribe function, etc. | ### REST Connector - -### REST Connector - -| td2.0-rest-connector version | Description | -| ------------------------- | ---------------------------------------------------------------- | -| 1.0.3 | Support connection management, general query, get system information, error message, continuous query, etc. |# API Reference - -## API Reference - -[API Reference](https://docs.taosdata.com/api/td2.0-connector/) diff --git a/docs/en/14-reference/04-taosadapter.md b/docs/en/14-reference/04-taosadapter.md index cad229c32d602e8fc595ec06f72a1a486e2af77b..e7ea620d0bed3aee3ff0acf8063120acca33c335 100644 --- a/docs/en/14-reference/04-taosadapter.md +++ b/docs/en/14-reference/04-taosadapter.md @@ -30,7 +30,7 @@ taosAdapter provides the following features. ### Install taosAdapter -taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine official website](https://tdengine.com/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation. +If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine 3.0 released versions](../../releases) to download the TDengine server installation package. If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) documentation. ### Start/Stop taosAdapter @@ -69,20 +69,23 @@ Usage of taosAdapter: --debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" --help Print this help message and exit --influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true) + --log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL" --log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos") --log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30) --log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB") --log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s) + --log.sqlRotationCount uint record sql log rotation count. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_COUNT" (default 2) + --log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB") + --log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s) --logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info") --monitor.collectDuration duration Set monitor duration. Env "TAOS_MONITOR_COLLECT_DURATION" (default 3s) --monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_MONITOR_IDENTITY" --monitor.incgroup Whether running in cgroup. Env "TAOS_MONITOR_INCGROUP" - --monitor.password string TDengine password. Env "TAOS_MONITOR_PASSWORD" (default "taosdata") - --monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80) + --monitor.password string TDengine password. Env "TAOS_MONITOR_PASSWORD" (default "taosdata") --monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80) --monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70) --monitor.user string TDengine user. Env "TAOS_MONITOR_USER" (default "root") --monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_MONITOR_WRITE_INTERVAL" (default 30s) - --monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_MONITOR_WRITE_TO_TD" (default true) + --monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_MONITOR_WRITE_TO_TD" --node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE" --node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE" --node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter") @@ -98,8 +101,10 @@ Usage of taosAdapter: --node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100]) --node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root") --opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true) + --opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1) --opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb]) --opentsdb_telnet.enable enable opentsdb telnet,warning: without auth info(default false). Env "TAOS_ADAPTER_OPENTSDB_TELNET_ENABLE" + --opentsdb_telnet.flushInterval duration opentsdb_telnet flush interval (0s means not valid) . Env "TAOS_ADAPTER_OPENTSDB_TELNET_FLUSH_INTERVAL" --opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250) --opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata") --opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049]) @@ -111,9 +116,6 @@ Usage of taosAdapter: -P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041) --prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true) --restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1) - --ssl.certFile string ssl cert file path. Env "TAOS_ADAPTER_SSL_CERT_FILE" - --ssl.enable enable ssl. Env "TAOS_ADAPTER_SSL_ENABLE" - --ssl.keyFile string ssl key file path. Env "TAOS_ADAPTER_SSL_KEY_FILE" --statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000) --statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd") --statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true) @@ -149,11 +151,12 @@ You do not need to care about these configurations if you do not make interface For details on the CORS protocol, please refer to: [https://www.w3.org/wiki/CORS_Enabled](https://www.w3.org/wiki/CORS_Enabled) or [https://developer.mozilla.org/zh-CN/docs/Web/HTTP/CORS](https://developer.mozilla.org/zh-CN/docs/Web/HTTP/CORS). -See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/blob/develop/example/config/taosadapter.toml) for sample configuration files. +See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/blob/3.0/example/config/taosadapter.toml) for sample configuration files. ## Feature List -- Compatible with RESTful interfaces [REST API](/reference/rest-api/) +- RESTful interface + [https://docs.tdengine.com/reference/rest-api/](https://docs.tdengine.com/reference/rest-api/) - Compatible with InfluxDB v1 write interface [https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/) - Compatible with OpenTSDB JSON and telnet format writes @@ -176,13 +179,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl ### TDengine RESTful interface -You can use any client that supports the http protocol to write data to or query data from TDengine by accessing the REST interface address `http://:6041/`. See the [official documentation](/reference/connector#restful) for details. The following EndPoint is supported. - -```text -/rest/sql -/rest/sqlt -/rest/sqlutc -``` +You can use any client that supports the http protocol to write data to or query data from TDengine by accessing the REST interface address `http://:6041/rest/sql`. See the [official documentation](/reference/rest-api/) for details. ### InfluxDB @@ -203,7 +200,7 @@ Note: InfluxDB token authorization is not supported at present. Only Basic autho ### OpenTSDB -You can use any client that supports the http protocol to access the RESTful interface address `http://:6041/` to write data in OpenTSDB compatible format to TDengine. +You can use any client that supports the http protocol to access the RESTful interface address `http://:6041/` to write data in OpenTSDB compatible format to TDengine. The EndPoint is as follows: ```text /opentsdb/v1/put/json/ @@ -254,7 +251,7 @@ HTTP response content. Stops processing all write and query requests when the `pauseAllMemoryThreshold` threshold is exceeded. -HTTP response: code 503 +HTTP response content. - code 503 - body "memory exceeds threshold" @@ -269,7 +266,7 @@ Status check interface `http://:6041/-/ping` Corresponding configuration parameter -``text +```text monitor.collectDuration monitoring interval environment variable `TAOS_MONITOR_COLLECT_DURATION` (default value 3s) monitor.incgroup whether to run in cgroup (set to true for running in container) environment variable `TAOS_MONITOR_INCGROUP` monitor.pauseAllMemoryThreshold memory threshold for no more inserts and queries environment variable `TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD` (default 80) @@ -297,11 +294,11 @@ taosAdapter supports writing the metrics of HTTP monitoring, CPU percentage, and For configuration parameters | **Configuration items** | **Description** | **Default values** | -| ----------------------- | --------------------------------------------------------- | ---------- | +|-------------------------|--------------------------------------------|----------| | monitor.collectDuration | CPU and memory collection interval | 3s | | monitor.identity | The current taosadapter identifier will be used if not set to `hostname:port` | | | monitor.incgroup | whether it is running in a cgroup (set to true for running in a container) | false | -| monitor.writeToTD | Whether to write to TDengine | true | +| monitor.writeToTD | Whether to write to TDengine | false | | monitor.user | TDengine connection username | root | | monitor.password | TDengine connection password | taosdata | | monitor.writeInterval | Write to TDengine interval | 30s | @@ -313,9 +310,7 @@ taosAdapter controls the number of results returned by the parameter `restfulRow This parameter controls the number of results returned by the following interfaces: - `http://:6041/rest/sql` -- `http://:6041/rest/sqlt` -- `http://:6041/rest/sqlutc` -- ` http://:6041/prometheus/v1/remote_read/:db` +- `http://:6041/prometheus/v1/remote_read/:db` ## Troubleshooting @@ -328,7 +323,7 @@ You can also adjust the level of the taosAdapter log output by setting the `--lo In TDengine server 2.2.x.x or earlier, the TDengine server process (taosd) contains an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed using `systemd` and has its own process ID. There are some configuration parameters and behaviors that are different between the two. See the following table for details. | **#** | **embedded httpd** | **taosAdapter** | **comment** | -| ----- | ------------------- | ------------------------------------ | ------------------------------------------------------------------ ------------------------------------------------------------------------ | +|-------|---------------------|-------------------------------|------------------------------------------------------------------------------------------------| | 1 | httpEnableRecordSql | --logLevel=debug | | | 2 | httpMaxThreads | n/a | taosAdapter Automatically manages thread pools without this parameter | | 3 | telegrafUseFieldNum | See the taosAdapter telegraf configuration method | | diff --git a/docs/en/14-reference/05-taosbenchmark.md b/docs/en/14-reference/05-taosbenchmark.md index 5d47cc06e8034d8c49669d71c3f98c1f587acb33..8f63dddfb711a98840cb423d1a4b0c1556a7b5fd 100644 --- a/docs/en/14-reference/05-taosbenchmark.md +++ b/docs/en/14-reference/05-taosbenchmark.md @@ -23,11 +23,7 @@ There are two ways to install taosBenchmark: TaosBenchmark needs to be executed on the terminal of the operating system, it supports two configuration methods: [Command-line arguments](#command-line-arguments-in-detail) and [JSON configuration file](#configuration-file-parameters-in-detail). These two methods are mutually exclusive. Users can use `-f ` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters. -<<<<<<< HEAD -taosBenchmark supports complete performance testing of TDengine. taosBenchmark supports the TDengine functions in three categories: write, query, and subscribe. These three functions are mutually exclusive, and users can select only one of them each time taosBenchmark runs. It is important to note that the type of functionality to be tested is not configurable when using the command-line configuration method, which can only test writing performance. To test the query and subscription performance of the TDengine, you must use the configuration file method and specify the function type to test via the parameter `filetype` in the configuration file. -======= -taosBenchmark supports the complete performance testing of TDengine by providing functionally to write, query, and subscribe. These three functions are mutually exclusive, users can only select one of them each time taosBenchmark runs. The query and subscribe functionalities are only configurable using a json configuration file by specifying the parameter `filetype`, while write can be performed through both the command-line and a configuration file. ->>>>>>> 108548b4d6 (docs: typo) +taosBenchmark supports the complete performance testing of TDengine by providing functionally to write, query, and subscribe. These three functions are mutually exclusive, users can only select one of them each time taosBenchmark runs. The query and subscribe functionalities are only configurable using a json configuration file by specifying the parameter `filetype`, while write can be performed through both the command-line and a configuration file. If you want to test the performance of queries or data subscriptionm configure taosBenchmark with the configuration file. You can modify the value of the `filetype` parameter to specify the function that you want to test. **Make sure that the TDengine cluster is running correctly before running taosBenchmark. ** @@ -61,8 +57,9 @@ Use the following command-line to run taosBenchmark and control its behavior via taosBenchmark -f ``` +**Sample configuration files** + #### Configuration file examples -##### Example of inserting a scenario JSON configuration file
    insert.json @@ -73,7 +70,7 @@ taosBenchmark -f
    -##### Query Scenario JSON Profile Example +#### Query Scenario JSON Profile Example
    query.json @@ -84,7 +81,7 @@ taosBenchmark -f
    -##### Subscription JSON configuration example +#### Subscription JSON configuration example
    subscribe.json @@ -128,7 +125,7 @@ taosBenchmark -f Enables interleaved insertion mode and specifies the number of rows of data to be inserted into each child table. Interleaved insertion mode means inserting the number of rows specified by this parameter into each sub-table and repeating the process until all sub-tables have been inserted. The default value is 0, i.e., data is inserted into one sub-table before the next sub-table is inserted. - **-i/--insert-interval ** : - Specify the insert interval in `ms` for interleaved insert mode. The default value is 0. It only works if `-B/--interlace-rows` is greater than 0. That means that after inserting interlaced rows for each child table, the data insertion with multiple threads will wait for the interval specified by this value before proceeding to the next round of writes. + Specify the insert interval in `ms` for interleaved insert mode. The default value is 0. It only works if `-B/--interlace-rows` is greater than 0. After inserting interlaced rows for each child table, the data insertion thread will wait for the interval specified by this value before proceeding to the next round of writes. - **-r/--rec-per-req ** : Writing the number of rows of records per request to TDengine, the default value is 30000. @@ -184,7 +181,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) This parameter indicates writing data with random values. The default is false. If users use this parameter, taosBenchmark will generate the random values. For tag/data columns of numeric type, the value is a random value within the range of values of that type. For NCHAR and BINARY type tag columns/data columns, the value is the random string within the specified length range. - **-x/--aggr-func** : - Switch parameter to indicate query aggregation function after insertion. The default value is false. + Switch parameter to indicate query aggregation function after insertion. The default is false. - **-y/--answer-yes** : Switch parameter that requires the user to confirm at the prompt to continue. The default value is false. @@ -230,45 +227,34 @@ The parameters listed in this section apply to all function modes. #### Database related configuration parameters -The parameters related to database creation are configured in `dbinfo` in the json configuration file, as follows. These parameters correspond to the database parameters specified when `create database` in TDengine. +The parameters related to database creation are configured in `dbinfo` in the json configuration file, as follows. The other parameters correspond to the database parameters specified when `create database` in [../../taos-sql/database]. - **name**: specify the name of the database. - **drop**: indicate whether to delete the database before inserting. The default is true. -- **replica**: specify the number of replicas when creating the database. - -- **days**: specify the time span for storing data in a single data file. The default is 10. - -- **cache**: specify the size of the cache blocks in MB. The default value is 16. - -- **blocks**: specify the number of cache blocks in each vnode. The default is 6. - -- **precision**: specify the database time precision. The default value is "ms". - -- **keep**: specify the number of days to keep the data. The default value is 3650. +#### Stream processing related configuration parameters -- **minRows**: specify the minimum number of records in the file block. The default value is 100. +The parameters for creating streams are configured in `stream` in the json configuration file, as shown below. -- **maxRows**: specify the maximum number of records in the file block. The default value is 4096. +- **stream_name**: Name of the stream. Mandatory. -- **comp**: specify the file compression level. The default value is 2. +- **stream_stb**: Name of the supertable for the stream. Mandatory. -- **walLevel** : specify WAL level, default is 1. +- **stream_sql**: SQL statement for the stream to process. Mandatory. -- **cacheLast**: indicate whether to allow the last record of each table to be kept in memory. The default value is 0. The value can be 0, 1, 2, or 3. +- **trigger_mode**: Triggering mode for stream processing. Optional. -- **quorum**: specify the number of writing acknowledgments in multi-replica mode. The default value is 1. +- **watermark**: Watermark for stream processing. Optional. -- **fsync**: specify the interval of fsync in ms when users set WAL to 2. The default value is 3000. - -- **update** : indicate whether to support data update, default value is 0, optional values are 0, 1, 2. +- **drop**: Whether to create the stream. Specify yes to create the stream or no to not create the stream. #### Super table related configuration parameters The parameters for creating super tables are configured in `super_tables` in the json configuration file, as shown below. - **name**: Super table name, mandatory, no default value. + - **child_table_exists** : whether the child table already exists, default value is "no", optional value is "yes" or "no". - **child_table_count** : The number of child tables, the default value is 10. @@ -319,6 +305,22 @@ The parameters for creating super tables are configured in `super_tables` in the - **tags_file** : only works when insert_mode is taosc, rest. The final tag value is related to the childtable_count. Suppose the tag data rows in the CSV file are smaller than the given number of child tables. In that case, taosBenchmark will read the CSV file data cyclically until the number of child tables specified by childtable_count is generated. Otherwise, taosBenchmark will read the childtable_count rows of tag data only. The final number of child tables generated is the smaller of the two. +#### TSMA configuration parameters + +The configuration parameters for specifying TSMAs are in `tsmas` in `super_tables`. + +- **name**: Specifies TSMA name. Mandatory. + +- **function**: Specifies TSMA function. Mandatory. + +- **interval**: Specifies TSMA interval. Mandatory. + +- **sliding**: Specifies time offset for TSMA window. Mandatory. + +- **custom**: Specifies custom configurations to attach to the end of the TSMA creation statement. Optional. + +- **start_when_inserted**: Specifies the number of inserted rows after which TSMA is started. Optional. The default value is 0. + #### Tag and Data Column Configuration Parameters The configuration parameters for specifying super table tag columns and data columns are in `columns` and `tag` in `super_tables`, respectively. @@ -338,6 +340,8 @@ The configuration parameters for specifying super table tag columns and data col - **values**: The value field of the nchar/binary column/label, which will be chosen randomly from the values. +- **sma**: Insert the column into the BSMA. Enter `yes` or `no`. The default is `no`. + #### insertion behavior configuration parameters - **thread_count**: specify the number of threads to insert data. Default is 8. @@ -350,17 +354,17 @@ The configuration parameters for specifying super table tag columns and data col - **confirm_parameter_prompt**: The switch parameter requires the user to confirm after the prompt to continue. The default value is false. -- **interlace_rows**: Enables interleaved insertion mode and specifies the number of rows of data to be inserted into each child table at a time. Interleaved insertion mode means inserting the number of rows specified by this parameter into each sub-table and repeating the process until all sub-tables are inserted. The default value is 0, which means that data will be inserted into the following child table only after data is inserted into one child table. +- **interlace_rows**: Enables interleaved insertion mode and specifies the number of rows of data to be inserted into each child table at a time. Staggered insertion mode means inserting the number of rows specified by this parameter into each sub-table and repeating the process until all sub-tables have been inserted. The default value is 0, i.e., data is inserted into one sub-table before the next sub-table is inserted. This parameter can also be configured in `super_tables`, and if so, the configuration in `super_tables` takes precedence and overrides the global setting. - **insert_interval** : - Specifies the insertion interval in ms for interleaved insertion mode. The default value is 0. Only works if `-B/--interlace-rows` is greater than 0. It means that after inserting interlace rows for each child table, the data insertion thread will wait for the interval specified by this value before proceeding to the next round of writes. - This parameter can also be configured in `super_tables`, and if configured, the configuration in `super_tables` takes high priority, overriding the global setting. + Specify the insert interval in `ms` for interleaved insert mode. The default value is 0. It only works if `-B/--interlace-rows` is greater than 0. After inserting interlaced rows for each child table, the data insertion thread will wait for the interval specified by this value before proceeding to the next round of writes. + This parameter can also be configured in `super_tables`, and if so, the configuration in `super_tables` takes precedence and overrides the global setting. - **num_of_records_per_req** : - The number of rows of data to be written per request to TDengine, the default value is 30000. When it is set too large, the TDengine client driver will return the corresponding error message, so you need to lower the setting of this parameter to meet the writing requirements. + Writing the number of rows of records per request to TDengine, the default value is 30000. When it is set too large, the TDengine client driver will return the corresponding error message, so you need to lower the setting of this parameter to meet the writing requirements. -- **prepare_rand**: The number of unique values in the generated random data. A value of 1 means that all data are the same. The default value is 10000. +- **prepare_rand**: The number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000. ### Query scenario configuration parameters @@ -388,7 +392,7 @@ The configuration parameters of the super table query are set in `super_table_qu - **threads**: The number of threads to execute the query SQL, the default value is 1. -- **sqls** : The default value is 1. +- **sqls**: - **sql**: The SQL command to be executed. For the query SQL of super table, keep "xxxx" in the SQL command. The program will automatically replace it with all the sub-table names of the super table. Replace it with all the sub-table names in the super table. - **result**: The file to save the query result. If not specified, taosBenchmark will not save result. @@ -411,9 +415,9 @@ The configuration parameters for subscribing to a sub-table or a generic table a - **resubAfterConsume**: "yes" means cancel the previous subscription and then subscribe again, "no" means continue the previous subscription, and the default value is "no". -- **sqls** : The default value is "no". +- **sqls**: - **sql** : The SQL command to be executed, required. - - **result** : The file to save the query result, unspecified is not saved. + - **result**: The file to save the query result. If not specified, taosBenchmark will not save result. #### Configuration parameters for subscribing to supertables @@ -431,7 +435,7 @@ The configuration parameters for subscribing to a super table are set in `super_ - **resubAfterConsume**: "yes" means cancel the previous subscription and then subscribe again, "no" means continue the previous subscription, and the default value is "no". -- **sqls** : The default value is "no". - - **sql**: SQL command to be executed, required; for the query SQL of the super table, keep "xxxx" in the SQL command, and the program will replace it with all the sub-table names of the super table automatically. +- **sqls**: + - **sql**: The SQL command to be executed. For the query SQL of super table, keep "xxxx" in the SQL command. The program will automatically replace it with all the sub-table names of the super table. Replace it with all the sub-table names in the super table. - - **result**: The file to save the query result, if not specified, it will not be saved. + - **result**: The file to save the query result. If not specified, taosBenchmark will not save result. diff --git a/docs/en/14-reference/06-taosdump.md b/docs/en/14-reference/06-taosdump.md index 2105ba83fad9700674e28609016b07ef6de66833..e73441a96b087062b2e3912ed73010fc3e761bb9 100644 --- a/docs/en/14-reference/06-taosdump.md +++ b/docs/en/14-reference/06-taosdump.md @@ -116,5 +116,4 @@ Usage: taosdump [OPTION...] dbname [tbname ...] Mandatory or optional arguments to long options are also mandatory or optional for any corresponding short options. -Report bugs to . ``` diff --git a/docs/en/14-reference/07-tdinsight/index.md b/docs/en/14-reference/07-tdinsight/index.md index e74c9de7b2aa71278a99d45f250e0dcaf86d4704..2e562035254311f2caa0b6d4512842080aab64d5 100644 --- a/docs/en/14-reference/07-tdinsight/index.md +++ b/docs/en/14-reference/07-tdinsight/index.md @@ -263,7 +263,7 @@ Once the import is complete, the full page view of TDinsight is shown below. ## TDinsight dashboard details -The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources [dnodes, mnodes, vnodes](https://www.taosdata.com/cn/documentation/architecture#cluster) or databases. +The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources [dnodes, mnodes, vnodes](../../taos-sql/node/) or databases. Details of the metrics are as follows. diff --git a/docs/en/14-reference/08-taos-shell.md b/docs/en/14-reference/08-taos-shell.md index c947e86d1ce298f32e82b51c991892a9448dc88b..656db1f481250ed4e34e068a02a93b75f0ac0b81 100644 --- a/docs/en/14-reference/08-taos-shell.md +++ b/docs/en/14-reference/08-taos-shell.md @@ -8,7 +8,7 @@ The TDengine command-line interface (hereafter referred to as `TDengine CLI`) is ## Installation -If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI in an environment where no TDengine server is running, the TDengine client installation package needs to be installed first. For details, please refer to [connector](/reference/connector/). +If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI in an environment where no TDengine server is running, the TDengine client installation package needs to be installed first. For details, please refer to [Connector](/reference/connector/). ## Execution @@ -23,6 +23,7 @@ TDengine CLI will display a welcome message and version information if it succes ```cmd taos> ``` + After entering the TDengine CLI, you can execute various SQL commands, including inserts, queries, or administrative commands. ## Execute SQL script file @@ -51,32 +52,33 @@ You can change the behavior of TDengine CLI by specifying command-line parameter - -P PORT: Specify the port number to be used by the server. Default is `6030` - -u USER: the user name to use when connecting. Default is `root` - -p PASSWORD: the password to use when connecting to the server. Default is `taosdata` -- -?, --help: print out all command-line arguments +- -?, --help: print out all command-line arguments And many more parameters. -- -a AUTHSTR: The auth string to use when connecting to the server -- -A: Generate auth string from password +- -a AUTHSTR: Authorization information to connect to the server. +- -A: Obtain authorization information from username and password. - -c CONFIGDIR: Specify the directory where configuration file exists. The default is `/etc/taos`, and the default name of the configuration file in this directory is `taos.cfg` - -C: Print the configuration parameters of `taos.cfg` in the default directory or specified by -c - -d DATABASE: Specify the database to use when connecting to the server -- -f FILE: Execute the SQL script file in non-interactive mode -- -k: Check the service status, 0: unavailable,1: network ok,2: service ok,3: service degraded,4: exiting -- -l PKTLEN: Test package length to be used for network testing -- -n NETROLE: test scope for network connection test, default is `client`. The value can be `client`, `server` -- -N PKTNUM: Test package numbers to be used for network testing +- -f FILE: Execute the SQL script file in non-interactive mode Note that each SQL statement in the script file must be only one line. +- -k: Test the operational status of the server. 0: unavailable; 1: network ok; 2: service ok; 3: service degraded; 4: exiting +- -l PKTLEN: Test package size to be used for network testing +- -n NETROLE: test scope for network connection test, default is `client`. The value can be `client` or `server`. +- -N PKTNUM: Number of packets used for network testing - -r: output the timestamp format as unsigned 64-bits integer (uint64_t in C language) - -s COMMAND: execute SQL commands in non-interactive mode -- -t: Check the details of the service status,status same as -k -- -w DISPLAYWIDTH: 客户端列显示宽度 -- -z, --timezone=TIMEZONE: Specify time zone. Default is the value of current configuration file -- -V, --version: Print out the current version number +- -t: Test the boot status of the server. The statuses of -k apply. +- -w DISPLAYWIDTH: Specify the number of columns of the server display. +- -z TIMEZONE: Specify time zone. Default is the value of current configuration file +- -V: Print out the current version number -Example. +For example: ```bash taos -h h1.taos.com -s "use db; show tables;" ``` + ## TDengine CLI tips - You can use the up and down keys to iterate the history of commands entered diff --git a/docs/en/14-reference/09-support-platform/index.md b/docs/en/14-reference/09-support-platform/index.md index 0bb269f2329097975f665b9318b2ec3f320fca5b..19c984898d9037d4414fe13157060fe744b8e179 100644 --- a/docs/en/14-reference/09-support-platform/index.md +++ b/docs/en/14-reference/09-support-platform/index.md @@ -5,11 +5,10 @@ description: "List of platforms supported by TDengine server, client, and connec ## List of supported platforms for TDengine server -| | **Windows server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18/20** | **UOS** | **kylin** | **Ningsi V60/V80** | -| ------------------ | ---------------------------- | ----------------- | ---------------- | ---------------- | ------- | --------- | ------------------ | -| X64 | ● | ● | ● | ● | ● | ● | ● | -| Raspberry Pi ARM64 | | | ● | | | | | -| HUAWEI Cloud ARM64 | | | | ● | | | | +| | **Windows Server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18/20** | +| ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- | +| X64 | ● | ● | ● | ● | +| ARM64 | | | ● | | Note: ● means officially tested and verified, ○ means unofficially tested and verified. diff --git a/docs/en/14-reference/11-docker/index.md b/docs/en/14-reference/11-docker/index.md index b7e60ab3e7f04a6078950977a563382a3524ebaa..7cd1e810dca010d16b0f2e257d47e012c6ef06cc 100644 --- a/docs/en/14-reference/11-docker/index.md +++ b/docs/en/14-reference/11-docker/index.md @@ -1,6 +1,6 @@ --- title: Deploying TDengine with Docker -Description: "This chapter focuses on starting the TDengine service in a container and accessing it." +description: "This chapter focuses on starting the TDengine service in a container and accessing it." --- This chapter describes how to start the TDengine service in a container and access it. Users can control the behavior of the service in the container by using environment variables on the docker run command-line or in the docker-compose file. @@ -24,13 +24,10 @@ The TDengine client taos can be executed in this container to access TDengine us ```shell $ docker exec -it tdengine taos -Welcome to the TDengine shell from Linux, Client Version:2.4.0.0 -Copyright (c) 2020 by TAOS Data, Inc. - taos> show databases; - name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | status precision | update | status | -================================================================================================================================== ================================================================================================================================== ================ - log | 2022-01-17 13:57:22.270 | 10 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready | + name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | +==================================================================================================================================================================================================================================================================================== + log | 2022-01-17 13:57:22.270 | 10 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready | Query OK, 1 row(s) in set (0.002843s) ``` @@ -47,13 +44,10 @@ The above command starts TDengine on the host network and uses the host's FQDN t ```shell $ taos -Welcome to the TDengine shell from Linux, Client Version:2.4.0.0 -Copyright (c) 2020 by TAOS Data, Inc. - taos> show dnodes; - id | end_point | vnodes | cores | status | role | create_time | offline reason | -================================================================================================================================== ==== - 1 | myhost:6030 | 1 | 8 | ready | any | 2022-01-17 22:10:32.619 | | + id | end_point | vnodes | cores | status | role | create_time | offline reason | +====================================================================================================================================== + 1 | myhost:6030 | 1 | 8 | ready | any | 2022-01-17 22:10:32.619 | | Query OK, 1 row(s) in set (0.003233s) ``` @@ -78,7 +72,7 @@ Next, ensure the hostname "tdengine" is resolvable in `/etc/hosts`. echo 127.0.0.1 tdengine |sudo tee -a /etc/hosts ``` -Finally, the TDengine service can be accessed from the taos shell or any connector with "tdengine" as the server address. +Finally, the TDengine service can be accessed from the TDengine CLI or any connector with "tdengine" as the server address. ```shell taos -h tdengine -P 6030 @@ -88,13 +82,13 @@ If set `TAOS_FQDN` to the same hostname, the effect is the same as "Start TDengi ## Start TDengine on the specified network -You can also start TDengine on a specific network. +You can also start TDengine on a specific network. Perform the following steps: 1. First, create a docker network named `td-net` ```shell docker network create td-net - ``` Create td-net + ``` 2. Start TDengine @@ -111,7 +105,7 @@ You can also start TDengine on a specific network. ```shell docker run --rm -it --network td-net -e TAOS_FIRST_EP=tdengine tdengine/tdengine taos # or - # docker run --rm -it --network td-net -e tdengine/tdengine taos -h tdengine + #docker run --rm -it --network td-net -e tdengine/tdengine taos -h tdengine ``` ## Launching a client application in a container @@ -121,8 +115,8 @@ If you want to start your application in a container, you need to add the corres ```docker FROM ubuntu:20.04 RUN apt-get update && apt-get install -y wget -ENV TDENGINE_VERSION=2.4.0.0 -RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ +ENV TDENGINE_VERSION=3.0.0.0 +RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && cd TDengine-client-${TDENGINE_VERSION} \ && ./install_client.sh \ @@ -147,7 +141,7 @@ import ( "fmt" "time" - _ "github.com/taosdata/driver-go/v2/taosSql" + _ "github.com/taosdata/driver-go/v3/taosSql" ) type config struct { @@ -222,8 +216,8 @@ Here is the full Dockerfile: ```docker FROM golang:1.17.6-buster as builder -ENV TDENGINE_VERSION=2.4.0.0 -RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ +ENV TDENGINE_VERSION=3.0.0.0 +RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && cd TDengine-client-${TDENGINE_VERSION} \ && ./install_client.sh \ @@ -238,8 +232,8 @@ RUN go build FROM ubuntu:20.04 RUN apt-get update && apt-get install -y wget -ENV TDENGINE_VERSION=2.4.0.0 -RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ +ENV TDENGINE_VERSION=3.0.0.0 +RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && cd TDengine-client-${TDENGINE_VERSION} \ && ./install_client.sh \ @@ -316,17 +310,17 @@ password: taosdata ``` :::note + - The `VERSION` environment variable is used to set the tdengine image tag - - `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time - - `TAOS_REPLICA` is used to set the default number of database replicas. Its value range is [1,3] - We recommend setting it with `TAOS_ARBITRATOR` to use arbitrator in a two-nodes environment. - - ::: +- `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time +- `TAOS_REPLICA` is used to set the default number of database replicas. Its value range is [1,3] + We recommend setting it with `TAOS_ARBITRATOR` to use arbitrator in a two-nodes environment. + ::: 2. Start the cluster ```shell - $ VERSION=2.4.0.0 docker-compose up -d + $ VERSION=3.0.0.0 docker-compose up -d Creating network "test_default" with the default driver Creating volume "test_taosdata-td1" with default driver Creating volume "test_taoslog-td1" with default driver @@ -353,9 +347,6 @@ password: taosdata ```shell $ docker-compose exec td-1 taos -s "show dnodes" - Welcome to the TDengine shell from Linux, Client Version:2.4.0.0 - Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. - taos> show dnodes id | end_point | vnodes | cores | status | role | create_time | offline reason | ====================================================================================================================================== @@ -371,15 +362,15 @@ password: taosdata 2. At the same time, for flexible deployment, taosAdapter can be started in a separate container - ```docker - services: - # ... - adapter: - image: tdengine/tdengine:$VERSION - command: taosadapter - ```` + ```docker + services: + # ... + adapter: + image: tdengine/tdengine:$VERSION + command: taosadapter + ``` - Suppose you want to deploy multiple taosAdapters to improve throughput and provide high availability. In that case, the recommended configuration method uses a reverse proxy such as Nginx to offer a unified access entry. For specific configuration methods, please refer to the official documentation of Nginx. Here is an example: + Suppose you want to deploy multiple taosAdapters to improve throughput and provide high availability. In that case, the recommended configuration method uses a reverse proxy such as Nginx to offer a unified access entry. For specific configuration methods, please refer to the official documentation of Nginx. Here is an example: ```docker version: "3" @@ -466,7 +457,7 @@ If you want to deploy a container-based TDengine cluster on multiple hosts, you The docker-compose file can refer to the previous section. Here is the command to start TDengine with docker swarm: ```shell -$ VERSION=2.4.0 docker stack deploy -c docker-compose.yml taos +$ VERSION=3.0.0.0 docker stack deploy -c docker-compose.yml taos Creating network taos_inter Creating network taos_api Creating service taos_arbitrator @@ -482,20 +473,20 @@ Checking status: $ docker stack ps taos ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS 79ni8temw59n taos_nginx.1 nginx:latest TM1701 Running Running about a minute ago -3e94u72msiyg taos_adapter.1 tdengine/tdengine:2.4.0 TM1702 Running Running 56 seconds ago -100amjkwzsc6 taos_td-2.1 tdengine/tdengine:2.4.0 TM1703 Running Running about a minute ago -pkjehr2vvaaa taos_td-1.1 tdengine/tdengine:2.4.0 TM1704 Running Running 2 minutes ago -tpzvgpsr1qkt taos_arbitrator.1 tdengine/tdengine:2.4.0 TM1705 Running Running 2 minutes ago -rvss3g5yg6fa taos_adapter.2 tdengine/tdengine:2.4.0 TM1706 Running Running 56 seconds ago -i2augxamfllf taos_adapter.3 tdengine/tdengine:2.4.0 TM1707 Running Running 56 seconds ago -lmjyhzccpvpg taos_adapter.4 tdengine/tdengine:2.4.0 TM1708 Running Running 56 seconds ago +3e94u72msiyg taos_adapter.1 tdengine/tdengine:3.0.0.0 TM1702 Running Running 56 seconds ago +100amjkwzsc6 taos_td-2.1 tdengine/tdengine:3.0.0.0 TM1703 Running Running about a minute ago +pkjehr2vvaaa taos_td-1.1 tdengine/tdengine:3.0.0.0 TM1704 Running Running 2 minutes ago +tpzvgpsr1qkt taos_arbitrator.1 tdengine/tdengine:3.0.0.0 TM1705 Running Running 2 minutes ago +rvss3g5yg6fa taos_adapter.2 tdengine/tdengine:3.0.0.0 TM1706 Running Running 56 seconds ago +i2augxamfllf taos_adapter.3 tdengine/tdengine:3.0.0.0 TM1707 Running Running 56 seconds ago +lmjyhzccpvpg taos_adapter.4 tdengine/tdengine:3.0.0.0 TM1708 Running Running 56 seconds ago $ docker service ls ID NAME MODE REPLICAS IMAGE PORTS -561t4lu6nfw6 taos_adapter replicated 4/4 tdengine/tdengine:2.4.0 -3hk5ct3q90sm taos_arbitrator replicated 1/1 tdengine/tdengine:2.4.0 +561t4lu6nfw6 taos_adapter replicated 4/4 tdengine/tdengine:3.0.0.0 +3hk5ct3q90sm taos_arbitrator replicated 1/1 tdengine/tdengine:3.0.0.0 d8qr52envqzu taos_nginx replicated 1/1 nginx:latest *:6041->6041/tcp, *:6044->6044/udp -2isssfvjk747 taos_td-1 replicated 1/1 tdengine/tdengine:2.4.0 -9pzw7u02ichv taos_td-2 replicated 1/1 tdengine/tdengine:2.4.0 +2isssfvjk747 taos_td-1 replicated 1/1 tdengine/tdengine:3.0.0.0 +9pzw7u02ichv taos_td-2 replicated 1/1 tdengine/tdengine:3.0.0.0 ``` From the above output, you can see two dnodes, two taosAdapters, and one Nginx reverse proxy service. @@ -511,5 +502,5 @@ verify: Service converged $ docker service ls -f name=taos_adapter ID NAME MODE REPLICAS IMAGE PORTS -561t4lu6nfw6 taos_adapter replicated 1/1 tdengine/tdengine:2.4.0 +561t4lu6nfw6 taos_adapter replicated 1/1 tdengine/tdengine:3.0.0.0 ``` diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md index b6b535429b00796b5d2636c467153415a4281e59..02921c3f6a4ce21175504c3c07bd51bb4a3dcf60 100644 --- a/docs/en/14-reference/12-config/index.md +++ b/docs/en/14-reference/12-config/index.md @@ -1,16 +1,13 @@ --- -sidebar_label: Configuration title: Configuration Parameters description: "Configuration parameters for client and server in TDengine" --- -In this chapter, all the configuration parameters on both server and client side are described thoroughly. - ## Configuration File on Server Side On the server side, the actual service of TDengine is provided by an executable `taosd` whose parameters can be configured in file `taos.cfg` to meet the requirements of different use cases. The default location of `taos.cfg` is `/etc/taos`, but can be changed by using `-c` parameter on the CLI of `taosd`. For example, the configuration file can be put under `/home/user` and used like below -```bash +``` taosd -c /home/user ``` @@ -24,8 +21,6 @@ taosd -C TDengine CLI `taos` is the tool for users to interact with TDengine. It can share same configuration file as `taosd` or use a separate configuration file. When launching `taos`, parameter `-c` can be used to specify the location where its configuration file is. For example `taos -c /home/cfg` means `/home/cfg/taos.cfg` will be used. If `-c` is not used, the default location of the configuration file is `/etc/taos`. For more details please use `taos --help` to get. -From version 2.0.10.0 below commands can be used to show the configuration parameters of the client side. - ```bash taos -C ``` @@ -36,6 +31,11 @@ taos --dump-config # Configuration Parameters +:::note +The parameters described in this document by the effect that they have on the system. + +::: + :::note `taosd` needs to be restarted for the parameters changed in the configuration file to take effect. @@ -45,19 +45,19 @@ taos --dump-config ### firstEp -| Attribute | Description | -| ------------- | ---------------------------------------------------------------------------------------------------- | -| Applicable | Server and Client | +| Attribute | Description | +| -------- | -------------------------------------------------------------- | +| Applicable | Server and Client | | Meaning | The end point of the first dnode in the cluster to be connected to when `taosd` or `taos` is started | -| Default Value | localhost:6030 | +| Default | localhost:6030 | ### secondEp -| Attribute | Description | -| ------------- | ---------------------------------------------------------------------------------------------------------------------- | -| Applicable | Server and Client | +| Attribute | Description | +| -------- | ------------------------------------------------------------------------------------- | +| Applicable | Server and Client | | Meaning | The end point of the second dnode to be connected to if the firstEp is not available when `taosd` or `taos` is started | -| Default Value | None | +| Default | None | ### fqdn @@ -65,36 +65,28 @@ taos --dump-config | ------------- | ------------------------------------------------------------------------ | | Applicable | Server Only | | Meaning | The FQDN of the host where `taosd` will be started. It can be IP address | -| Default Value | The first hostname configured for the host | -| Note | It should be within 96 bytes | +| Default Value | The first hostname configured for the host | +| Note | It should be within 96 bytes | | ### serverPort -| Attribute | Description | -| ------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| Applicable | Server Only | +| Attribute | Description | +| -------- | ----------------------------------------------------------------------------------------------------------------------- | +| Applicable | Server Only | | Meaning | The port for external access after `taosd` is started | | Default Value | 6030 | -| Note | REST service is provided by `taosd` before 2.4.0.0 but by `taosAdapter` after 2.4.0.0, the default port of REST service is 6041 | :::note -TDengine uses 13 continuous ports, both TCP and UDP, starting with the port specified by `serverPort`. You should ensure, in your firewall rules, that these ports are kept open. Below table describes the ports used by TDengine in details. - +- Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details. ::: - | Protocol | Default Port | Description | How to configure | | :------- | :----------- | :----------------------------------------------- | :--------------------------------------------------------------------------------------------- | -| TCP | 6030 | Communication between client and server | serverPort | -| TCP | 6035 | Communication among server nodes in cluster | serverPort+5 | -| TCP | 6040 | Data syncup among server nodes in cluster | serverPort+10 | +| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort | | TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) | -| TCP | 6042 | Service Port of Arbitrator | The parameter of Arbitrator | -| TCP | 6043 | Service Port of TaosKeeper | The parameter of TaosKeeper | -| TCP | 6044 | Data access port for StatsD | refer to [taosAdapter](/reference/taosadapter/) | -| UDP | 6045 | Data access for statsd | refer to [taosAdapter](/reference/taosadapter/) | -| TCP | 6060 | Port of Monitoring Service in Enterprise version | | -| UDP | 6030-6034 | Communication between client and server | serverPort | -| UDP | 6035-6039 | Communication among server nodes in cluster | serverPort | +| TCP | 6043 | Service Port of TaosKeeper | The parameter of TaosKeeper | +| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters. +| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters. +| TCP | 6060 | Port of Monitoring Service in Enterprise version | | ### maxShellConns @@ -105,104 +97,109 @@ TDengine uses 13 continuous ports, both TCP and UDP, starting with the port spec | Value Range | 10-50000000 | | Default Value | 5000 | -### maxConnections +## Monitoring Parameters -| Attribute | Description | -| ------------- | ----------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | The maximum number of connections allowed by a database | -| Value Range | 1-100000 | -| Default Value | 5000 | -| Note | The maximum number of worker threads on the client side is maxConnections/100 | +### monitor -### rpcForceTcp +| Attribute | Description | +| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Applicable | Server only | +| Meaning | The switch for monitoring inside server. The main object of monitoring is to collect information about load on physical nodes, including CPU usage, memory usage, disk usage, and network bandwidth. Monitoring information is sent over HTTP to the taosKeeper service specified by `monitorFqdn` and `monitorProt`. +| Value Range | 0: monitoring disabled, 1: monitoring enabled | +| Default | 1 | -| Attribute | Description | -| ------------- | ------------------------------------------------------------------- | -| Applicable | Server and Client | -| Meaning | TCP is used by force | -| Value Range | 0: disabled 1: enabled | -| Default Value | 0 | -| Note | It's suggested to configure to enable if network is not good enough | +### monitorFqdn -## Monitoring Parameters +| Attribute | Description | +| -------- | -------------------------- | +| Applicable | Server Only | +| Meaning | FQDN of taosKeeper monitoring service | +| Default | None | -### monitor +### monitorPort -| Attribute | Description | -| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | The switch for monitoring inside server. The workload of the hosts, including CPU, memory, disk, network, TTP requests, are collected and stored in a system builtin database `LOG` | -| Value Range | 0: monitoring disabled, 1: monitoring enabled | -| Default Value | 1 | +| Attribute | Description | +| -------- | --------------------------- | +| Applicable | Server Only | +| Meaning | Port of taosKeeper monitoring service | +| Default Value | 6043 | ### monitorInterval -| Attribute | Description | -| ------------- | ------------------------------------------ | -| Applicable | Server Only | +| Attribute | Description | +| -------- | -------------------------------------------- | +| Applicable | Server Only | | Meaning | The interval of collecting system workload | | Unit | second | -| Value Range | 1-600 | -| Default Value | 30 | +| Value Range | 1-200000 | +| Default Value | 30 | ### telemetryReporting -| Attribute | Description | -| ------------- | ---------------------------------------------------------------------------- | -| Applicable | Server Only | +| Attribute | Description | +| -------- | ---------------------------------------- | +| Applicable | Server Only | | Meaning | Switch for allowing TDengine to collect and report service usage information | | Value Range | 0: Not allowed; 1: Allowed | -| Default Value | 1 | +| Default Value | 1 | ## Query Parameters -### queryBufferSize +### queryPolicy + +| Attribute | Description | +| -------- | ----------------------------- | +| Applicable | Client only | +| Meaning | Execution policy for query statements | +| Unit | None | +| Default | 1 | +| Notes | 1: Run queries on vnodes and not on qnodes | + +2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes. + +3: Only run scan operators on vnodes; run all other operators on qnodes. + +### querySmaOptimize + +| Attribute | Description | +| -------- | -------------------- | +| Applicable | Client only | +| 含义 | SMA index optimization policy | +| Unit | None | +| Default Value | 0 | +| Notes | -| Attribute | Description | -| ------------- | ---------------------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | The total memory size reserved for all queries | -| Unit | MB | -| Default Value | None | -| Note | It can be estimated by "maximum number of concurrent queries" _ "number of tables" _ 170 | +0: Disable SMA indexing and perform all queries on non-indexed data. -### ratioOfQueryCores +1: Enable SMA indexing and perform queries from suitable statements on precomputation results.| -| Attribute | Description | -| ------------- | ----------------------------------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | Maximum number of query threads | -| Default Value | 1 | -| Note | value range: float number between [0, 2] 0: only 1 query thread; >0: the times of the number of cores | ### maxNumOfDistinctRes -| Attribute | Description | -| ------------- | -------------------------------------------- | -| Applicable | Server Only | +| Attribute | Description | +| -------- | -------------------------------- | --- | +| Applicable | Server Only | | Meaning | The maximum number of distinct rows returned | | Value Range | [100,000 - 100,000,000] | | Default Value | 100,000 | -| Note | After version 2.3.0.0 | ## Locale Parameters ### timezone -| Attribute | Description | -| ------------- | ------------------------------- | -| Applicable | Server and Client | +| Attribute | Description | +| -------- | ------------------------------ | +| Applicable | Server and Client | | Meaning | TimeZone | | Default Value | TimeZone configured in the host | :::info -To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored in TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly. +To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored in TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. Note that Unix timestamps are converted and recorded on the client side. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly. -On Linux system, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below. +On Linux system, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below. For example: ``` -timezone UTC-7 +timezone UTC-8 timezone GMT-8 timezone Asia/Shanghai ``` @@ -240,11 +237,11 @@ To avoid the problems of using time strings, Unix timestamp can be used directly | Default Value | Locale configured in host | :::info -A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, and Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly. +A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, and Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. Note that the correct encoding is determined by the user. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly. The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE. -The locale definition standard on Linux is: \_., for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. On Linux and Mac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset. +The locale definition standard on Linux is: \_., for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. The charset indicates how to display the characters. On Linux and Mac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset. ::: @@ -257,29 +254,37 @@ The locale definition standard on Linux is: \_., f | Default Value | charset set in the system | :::info -On Linux, if `charset` is not set in `taos.cfg`, when `taos` is started, the charset is obtained from system locale. If obtaining charset from system locale fails, `taos` would fail to start. So on Linux system, if system locale is set properly, it's not necessary to set `charset` in `taos.cfg`. For example: +On Linux, if `charset` is not set in `taos.cfg`, when `taos` is started, the charset is obtained from system locale. If obtaining charset from system locale fails, `taos` would fail to start. + +So on Linux system, if system locale is set properly, it's not necessary to set `charset` in `taos.cfg`. For example: ``` locale zh_CN.UTF-8 ``` +On Windows system, it's not possible to obtain charset from system locale. If it's not set in configuration file `taos.cfg`, it would be default to CP936, same as set as below in `taos.cfg`. For example + +``` +charset CP936 +``` + +Refer to the documentation for your operating system before changing the charset. + On a Linux system, if the charset contained in `locale` is not consistent with that set by `charset`, the later setting in the configuration file takes precedence. -```title="Effective charset is GBK" +``` locale zh_CN.UTF-8 charset GBK ``` -```title="Effective charset is UTF-8" +The charset that takes effect is GBK. + +``` charset GBK locale zh_CN.UTF-8 ``` -On Windows system, it's not possible to obtain charset from system locale. If it's not set in configuration file `taos.cfg`, it would be default to CP936, same as set as below in `taos.cfg`. For example - -``` -charset CP936 -``` +The charset that takes effect is UTF-8. ::: @@ -287,429 +292,98 @@ charset CP936 ### dataDir -| Attribute | Description | -| ------------- | ------------------------------------------- | +| Attribute | Description | +| -------- | ------------------------------------------ | | Applicable | Server Only | | Meaning | All data files are stored in this directory | | Default Value | /var/lib/taos | -### cache - -| Attribute | Description | -| ------------- | ----------------------------- | -| Applicable | Server Only | -| Meaning | The size of each memory block | -| Unit | MB | -| Default Value | 16 | - -### blocks - -| Attribute | Description | -| ------------- | -------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | The number of memory blocks of size `cache` used by each vnode | -| Default Value | 6 | - -### days - -| Attribute | Description | -| ------------- | ----------------------------------------------------- | -| Applicable | Server Only | -| Meaning | The time range of the data stored in single data file | -| Unit | day | -| Default Value | 10 | - -### keep - -| Attribute | Description | -| ------------- | -------------------------------------- | -| Applicable | Server Only | -| Meaning | The number of days for data to be kept | -| Unit | day | -| Default Value | 3650 | - -### minRows - -| Attribute | Description | -| ------------- | ------------------------------------------ | -| Applicable | Server Only | -| Meaning | minimum number of rows in single data file | -| Default Value | 100 | - -### maxRows - -| Attribute | Description | -| ------------- | ------------------------------------------ | -| Applicable | Server Only | -| Meaning | maximum number of rows in single data file | -| Default Value | 4096 | - -### walLevel - -| Attribute | Description | -| ------------- | ---------------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | WAL level | -| Value Range | 0: wal disabled
    1: wal enabled without fsync
    2: wal enabled with fsync | -| Default Value | 1 | - -### fsync - -| Attribute | Description | -| ------------- | --------------------------------------------------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | The waiting time for invoking fsync when walLevel is 2 | -| Unit | millisecond | -| Value Range | 0: no waiting time, fsync is performed immediately once WAL is written;
    maximum value is 180000, i.e. 3 minutes | -| Default Value | 3000 | - -### update - -| Attribute | Description | -| ------------- | ------------------------------------------------------------------------------------------------------ | -| Applicable | Server Only | -| Meaning | If it's allowed to update existing data | -| Value Range | 0: not allowed
    1: a row can only be updated as a whole
    2: a part of columns can be updated | -| Default Value | 0 | -| Note | Not available from version 2.0.8.0 | - -### cacheLast - -| Attribute | Description | -| ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | Whether to cache the latest rows of each sub table in memory | -| Value Range | 0: not cached
    1: the last row of each sub table is cached
    2: the last non-null value of each column is cached
    3: identical to both 1 and 2 are set | -| Default Value | 0 | - ### minimalTmpDirGB -| Attribute | Description | -| ------------- | ----------------------------------------------------------------------------------------------- | -| Applicable | Server and Client | +| Attribute | Description | +| -------- | ------------------------------------------------ | +| Applicable | Server and Client | | Meaning | When the available disk space in tmpDir is below this threshold, writing to tmpDir is suspended | -| Unit | GB | -| Default Value | 1.0 | +| Unit | GB | +| Default Value | 1.0 | ### minimalDataDirGB -| Attribute | Description | -| ------------- | ------------------------------------------------------------------------------------------------ | -| Applicable | Server Only | -| Meaning | hen the available disk space in dataDir is below this threshold, writing to dataDir is suspended | -| Unit | GB | -| Default Value | 2.0 | - -### vnodeBak - -| Attribute | Description | -| ------------- | --------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | Whether to backup the corresponding vnode directory when a vnode is deleted | -| Value Range | 0: not backed up, 1: backup | -| Default Value | 1 | +| Attribute | Description | +| -------- | ------------------------------------------------ | +| Applicable | Server Only | +| Meaning | When the available disk space in dataDir is below this threshold, writing to dataDir is suspended | +| Unit | GB | +| Default Value | 2.0 | ## Cluster Parameters -### numOfMnodes - -| Attribute | Description | -| ------------- | ------------------------------ | -| Applicable | Server Only | -| Meaning | The number of management nodes | -| Default Value | 3 | - -### replica - -| Attribute | Description | -| ------------- | -------------------------- | -| Applicable | Server Only | -| Meaning | The number of replications | -| Value Range | 1-3 | -| Default Value | 1 | - -### quorum - -| Attribute | Description | -| ------------- | ------------------------------------------------------------------------------------------ | -| Applicable | Server Only | -| Meaning | The number of required confirmations for data replication in case of multiple replications | -| Value Range | 1,2 | -| Default Value | 1 | - -### role - -| Attribute | Description | -| ------------- | --------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | The role of the dnode | -| Value Range | 0: both mnode and vnode
    1: mnode only
    2: dnode only | -| Default Value | 0 | - -### balance - -| Attribute | Description | -| ------------- | ------------------------ | -| Applicable | Server Only | -| Meaning | Automatic load balancing | -| Value Range | 0: disabled, 1: enabled | -| Default Value | 1 | - -### balanceInterval - -| Attribute | Description | -| ------------- | ----------------------------------------------- | -| Applicable | Server Only | -| Meaning | The interval for checking load balance by mnode | -| Unit | second | -| Value Range | 1-30000 | -| Default Value | 300 | - -### arbitrator +### supportVnodes -| Attribute | Description | -| ------------- | -------------------------------------------------- | -| Applicable | Server Only | -| Meaning | End point of arbitrator, format is same as firstEp | -| Default Value | None | +| Attribute | Description | +| -------- | --------------------------- | +| Applicable | Server Only | +| Meaning | Maximum number of vnodes per dnode | +| Value Range | 0-4096 | +| Default Value | 256 | ## Time Parameters -### precision - -| Attribute | Description | -| ------------- | ------------------------------------------------- | -| Applicable | Server only | -| Meaning | Time precision used for each database | -| Value Range | ms: millisecond; us: microsecond ; ns: nanosecond | -| Default Value | ms | - -### rpcTimer - -| Attribute | Description | -| ------------- | ------------------ | -| Applicable | Server and Client | -| Meaning | rpc retry interval | -| Unit | milliseconds | -| Value Range | 100-3000 | -| Default Value | 300 | - -### rpcMaxTime - -| Attribute | Description | -| ------------- | ---------------------------------- | -| Applicable | Server and Client | -| Meaning | maximum wait time for rpc response | -| Unit | second | -| Value Range | 100-7200 | -| Default Value | 600 | - ### statusInterval -| Attribute | Description | -| ------------- | ----------------------------------------------- | -| Applicable | Server Only | +| Attribute | Description | +| -------- | --------------------------- | +| Applicable | Server Only | | Meaning | the interval of dnode reporting status to mnode | | Unit | second | -| Value Range | 1-10 | -| Default Value | 1 | +| Value Range | 1-10 | +| Default Value | 1 | ### shellActivityTimer -| Attribute | Description | -| ------------- | ------------------------------------------------------ | -| Applicable | Server and Client | -| Meaning | The interval for taos shell to send heartbeat to mnode | -| Unit | second | -| Value Range | 1-120 | -| Default Value | 3 | - -### tableMetaKeepTimer - -| Attribute | Description | -| ------------- | -------------------------------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | The expiration time for metadata in cache, once it's reached the client would refresh the metadata | -| Unit | second | -| Value Range | 1-8640000 | -| Default Value | 7200 | - -### maxTmrCtrl - -| Attribute | Description | -| ------------- | ------------------------ | -| Applicable | Server and Client | -| Meaning | Maximum number of timers | -| Unit | None | -| Value Range | 8-2048 | -| Default Value | 512 | - -### offlineThreshold - -| Attribute | Description | -| ------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | The expiration time for dnode online status, once it's reached before receiving status from a node, the dnode becomes offline | -| Unit | second | -| Value Range | 5-7200000 | -| Default Value | 86400\*10 (i.e. 10 days) | - -## Performance Optimization Parameters - -### numOfThreadsPerCore - -| Attribute | Description | -| ------------- | ------------------------------------------- | -| Applicable | Server and Client | -| Meaning | The number of consumer threads per CPU core | -| Default Value | 1.0 | - -### ratioOfQueryThreads - -| Attribute | Description | -| ------------- | --------------------------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | Maximum number of query threads | -| Value Range | 0: Only one query thread
    1: Same as number of CPU cores
    2: two times of CPU cores | -| Default Value | 1 | -| Note | This value can be a float number, 0.5 means half of the CPU cores | - -### maxVgroupsPerDb - -| Attribute | Description | -| ------------- | ------------------------------------ | -| Applicable | Server Only | -| Meaning | Maximum number of vnodes for each DB | -| Value Range | 0-8192 | -| Default Value | | - -### maxTablesPerVnode - | Attribute | Description | -| ------------- | -------------------------------------- | -| Applicable | Server Only | -| Meaning | Maximum number of tables in each vnode | -| Default Value | 1000000 | - -### minTablesPerVnode - -| Attribute | Description | -| ------------- | -------------------------------------- | -| Applicable | Server Only | -| Meaning | Minimum number of tables in each vnode | -| Default Value | 1000 | - -### tableIncStepPerVnode - -| Attribute | Description | -| ------------- | ------------------------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | When minTablesPerVnode is reached, the number of tables are allocated for a vnode each time | -| Default Value | 1000 | - -### maxNumOfOrderedRes - -| Attribute | Description | -| ------------- | ------------------------------------------- | -| Applicable | Server and Client | -| Meaning | Maximum number of rows ordered for a STable | -| Default Value | 100,000 | - -### mnodeEqualVnodeNum +| -------- | --------------------------------- | +| Applicable | Server and Client | +| Meaning | The interval for TDengine CLI to send heartbeat to mnode | +| Unit | second | +| Value Range | 1-120 | +| Default Value | 3 | -| Attribute | Description | -| ------------- | ----------------------------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | The number of vnodes whose system resources consumption are considered as equal to single mnode | -| Default Value | 4 | +## Performance Optimization Parameters ### numOfCommitThreads -| Attribute | Description | -| ------------- | ----------------------------------------- | -| Applicable | Server Only | +| Attribute | Description | +| -------- | ---------------------- | +| Applicable | Server Only | | Meaning | Maximum of threads for committing to disk | -| Default Value | | +| Default Value | | ## Compression Parameters -### comp - -| Attribute | Description | -| ------------- | ------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | Whether data is compressed | -| Value Range | 0: uncompressed, 1: One phase compression, 2: Two phase compression | -| Default Value | 2 | - -### tsdbMetaCompactRatio - -| Attribute | Description | -| ------------- | ------------------------------------------------------------------------------------------- | -| Meaning | The threshold for percentage of redundant in meta file to trigger compression for meta file | -| Value Range | 0: no compression forever, [1-100]: The threshold percentage | -| Default Value | 0 | - ### compressMsgSize -| Attribute | Description | -| ------------- | -------------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | The threshold for message size to compress the message.. | +| Attribute | Description | +| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Applicable | Server Only | +| Meaning | The threshold for message size to compress the message. | Set the value to 64330 bytes for good message compression. | | Unit | bytes | | Value Range | 0: already compress; >0: compress when message exceeds it; -1: always uncompress | -| Default Value | -1 | +| Default Value | -1 | ### compressColData -| Attribute | Description | -| ------------- | ------------------------------------------------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | The threshold for size of column data to trigger compression for the query result | +| Attribute | Description | +| -------- | --------------------------------------------------------------------------------------- | +| Applicable | Server Only | +| Meaning | The threshold for size of column data to trigger compression for the query result | | Unit | bytes | | Value Range | 0: always compress; >0: only compress when the size of any column data exceeds the threshold; -1: always uncompress | +| Default Value | -1 | | Default Value | -1 | -| Note | available from version 2.3.0.0 | +| Note | available from version 2.3.0.0 | | -### lossyColumns - -| Attribute | Description | -| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | The floating number types for lossy compression | -| Value Range | "": lossy compression is disabled
    float: only for float
    double: only for double
    float \| double: for both float and double | -| Default Value | "" , i.e. disabled | - -### fPrecision - -| Attribute | Description | -| ------------- | ----------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | Compression precision for float type | -| Value Range | 0.1 ~ 0.00000001 | -| Default Value | 0.00000001 | -| Note | The fractional part lower than this value will be discarded | - -### dPrecision - -| Attribute | Description | -| ------------- | ----------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | Compression precision for double type | -| Value Range | 0.1 ~ 0.0000000000000001 | -| Default Value | 0.0000000000000001 | -| Note | The fractional part lower than this value will be discarded | - -## Continuous Query Parameters - -### stream - -| Attribute | Description | -| ------------- | ---------------------------------- | -| Applicable | Server Only | -| Meaning | Whether to enable continuous query | -| Value Range | 0: disabled
    1: enabled | -| Default Value | 1 | +## Continuous Query Parameters | ### minSlidingTime @@ -731,390 +405,444 @@ charset CP936 | Value Range | 1-1000000 | | Default Value | 10 | -### maxStreamCompDelay - -| Attribute | Description | -| ------------- | ------------------------------------------------ | -| Applicable | Server Only | -| Meaning | Maximum delay before starting a continuous query | -| Unit | millisecond | -| Value Range | 10-1000000000 | -| Default Value | 20000 | - -### maxFirstStreamCompDelay - -| Attribute | Description | -| ------------- | -------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | Maximum delay time before starting a continuous query the first time | -| Unit | millisecond | -| Value Range | 10-1000000000 | -| Default Value | 10000 | - -### retryStreamCompDelay - -| Attribute | Description | -| ------------- | --------------------------------------------- | -| Applicable | Server Only | -| Meaning | Delay time before retrying a continuous query | -| Unit | millisecond | -| Value Range | 10-1000000000 | -| Default Value | 10 | - -### streamCompDelayRatio - -| Attribute | Description | -| ------------- | ------------------------------------------------------------------------ | -| Applicable | Server Only | -| Meaning | The delay ratio, with time window size as the base, for continuous query | -| Value Range | 0.1-0.9 | -| Default Value | 0.1 | - :::info To prevent system resource from being exhausted by multiple concurrent streams, a random delay is applied on each stream automatically. `maxFirstStreamCompDelay` is the maximum delay time before a continuous query is started the first time. `streamCompDelayRatio` is the ratio for calculating delay time, with the size of the time window as base. `maxStreamCompDelay` is the maximum delay time. The actual delay time is a random time not bigger than `maxStreamCompDelay`. If a continuous query fails, `retryStreamComDelay` is the delay time before retrying it, also not bigger than `maxStreamCompDelay`. ::: -## HTTP Parameters - -:::note -HTTP service was provided by `taosd` prior to version 2.4.0.0 and is provided by `taosAdapter` after version 2.4.0.0. -The parameters described in this section are only application in versions prior to 2.4.0.0. If you are using any version from 2.4.0.0, please refer to [taosAdapter](/reference/taosadapter/). - -::: - -### http - -| Attribute | Description | -| ------------- | ------------------------------ | -| Applicable | Server Only | -| Meaning | Whether to enable http service | -| Value Range | 0: disabled, 1: enabled | -| Default Value | 1 | - -### httpEnableRecordSql - -| Attribute | Description | -| ------------- | ------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | Whether to record the SQL invocation through REST interface | -| Default Value | 0: false; 1: true | -| Note | The resulting files, i.e. httpnote.0/httpnote.1, are located under logDir | - -### httpMaxThreads - -| Attribute | Description | -| ------------- | -------------------------------------------- | -| Applicable | Server Only | -| Meaning | The number of threads for RESTFul interface. | -| Default Value | 2 | - -### restfulRowLimit - -| Attribute | Description | -| ------------- | ------------------------------------------------------------ | -| Applicable | Server Only | -| Meaning | Maximum number of rows returned each time by REST interface. | -| Default Value | 10240 | -| Note | Maximum value is 10,000,000 | - -### httpDBNameMandatory - -| Attribute | Description | -| ------------- | ---------------------------------------- | -| Applicable | Server Only | -| Meaning | Whether database name is required in URL | -| Value Range | 0:not required, 1: required | -| Default Value | 0 | -| Note | From version 2.3.0.0 | - ## Log Parameters ### logDir -| Attribute | Description | -| ------------- | ----------------------------------- | -| Applicable | Server and Client | +| Attribute | Description | +| -------- | -------------------------------------------------- | +| Applicable | Server and Client | | Meaning | The directory for writing log files | | Default Value | /var/log/taos | ### minimalLogDirGB -| Attribute | Description | -| ------------- | -------------------------------------------------------------------------------------------------- | -| Applicable | Server and Client | +| Attribute | Description | +| -------- | -------------------------------------------- | +| Applicable | Server and Client | | Meaning | When the available disk space in logDir is below this threshold, writing to log files is suspended | -| Unit | GB | -| Default Value | 1.0 | +| Unit | GB | +| Default Value | 1.0 | ### numOfLogLines -| Attribute | Description | -| ------------- | ------------------------------------------ | -| Applicable | Server and Client | +| Attribute | Description | +| -------- | ---------------------------- | +| Applicable | Server and Client | | Meaning | Maximum number of lines in single log file | -| Default Value | 10,000,000 | +| Default Value | 10000000 | ### asyncLog -| Attribute | Description | -| ------------- | ---------------------------- | -| Applicable | Server and Client | +| Attribute | Description | +| -------- | -------------------- | +| Applicable | Server and Client | | Meaning | The mode of writing log file | | Value Range | 0: sync way; 1: async way | -| Default Value | 1 | +| Default Value | 1 | ### logKeepDays -| Attribute | Description | -| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | -| Applicable | Server and Client | +| Attribute | Description | +| -------- | ----------------------------------------------------------------------------------- | +| Applicable | Server and Client | | Meaning | The number of days for log files to be kept | -| Unit | day | -| Default Value | 0 | +| Unit | day | +| Default Value | 0 | | Note | When it's bigger than 0, the log file would be renamed to "taosdlog.xxx" in which "xxx" is the timestamp when the file is changed last time | ### debugFlag -| Attribute | Description | -| ------------- | --------------------------------------------------------- | -| Applicable | Server and Client | +| Attribute | Description | +| -------- | ------------------------------------------------------------------------------------------------- | +| Applicable | Server and Client | | Meaning | Log level | | Value Range | 131: INFO/WARNING/ERROR; 135: plus DEBUG; 143: plus TRACE | | Default Value | 131 or 135, depending on the module | -### mDebugFlag +### tmrDebugFlag -| Attribute | Description | -| ------------- | ------------------ | -| Applicable | Server Only | -| Meaning | Log level of mnode | +| Attribute | Description | +| -------- | -------------------- | +| Applicable | Server and Client | +| Meaning | Log level of timer module | | Value Range | same as debugFlag | -| Default Value | 135 | +| Default Value | | -### dDebugFlag +### uDebugFlag -| Attribute | Description | -| ------------- | ------------------ | -| Applicable | Server and Client | -| Meaning | Log level of dnode | +| Attribute | Description | +| -------- | ---------------------- | +| Applicable | Server and Client | +| Meaning | Log level of common module | | Value Range | same as debugFlag | -| Default Value | 135 | - -### sDebugFlag - -| Attribute | Description | -| ------------- | ------------------------ | -| Applicable | Server and Client | -| Meaning | Log level of sync module | -| Value Range | same as debugFlag | -| Default Value | 135 | - -### wDebugFlag - -| Attribute | Description | -| ------------- | ----------------------- | -| Applicable | Server and Client | -| Meaning | Log level of WAL module | -| Value Range | same as debugFlag | -| Default Value | 135 | - -### sdbDebugFlag - -| Attribute | Description | -| ------------- | ---------------------- | -| Applicable | Server and Client | -| Meaning | logLevel of sdb module | -| Value Range | same as debugFlag | -| Default Value | 135 | +| Default Value | | ### rpcDebugFlag -| Attribute | Description | -| ------------- | ----------------------- | -| Applicable | Server and Client | +| Attribute | Description | +| -------- | -------------------- | +| Applicable | Server and Client | | Meaning | Log level of rpc module | -| Value Range | Same as debugFlag | -| Default Value | | +| Value Range | same as debugFlag | +| Default Value | | -### tmrDebugFlag +### jniDebugFlag -| Attribute | Description | -| ------------- | ------------------------- | +| Attribute | Description | +| -------- | ------------------ | +| Applicable | Client Only | +| Meaning | Log level of jni module | +| Value Range | same as debugFlag | +| Default Value | | + +### qDebugFlag + +| Attribute | Description | +| -------- | -------------------- | | Applicable | Server and Client | -| Meaning | Log level of timer module | -| Value Range | Same as debugFlag | -| Default Value | | +| Meaning | Log level of query module | +| Value Range | same as debugFlag | +| Default Value | | ### cDebugFlag -| Attribute | Description | -| ------------- | ------------------- | +| Attribute | Description | +| -------- | --------------------- | | Applicable | Client Only | | Meaning | Log level of Client | -| Value Range | Same as debugFlag | -| Default Value | | - -### jniDebugFlag - -| Attribute | Description | -| ------------- | ----------------------- | -| Applicable | Client Only | -| Meaning | Log level of jni module | -| Value Range | Same as debugFlag | -| Default Value | | - -### odbcDebugFlag - -| Attribute | Description | -| ------------- | ------------------------ | -| Applicable | Client Only | -| Meaning | Log level of odbc module | -| Value Range | Same as debugFlag | -| Default Value | | +| Value Range | same as debugFlag | +| Default Value | | -### uDebugFlag +### dDebugFlag -| Attribute | Description | -| ------------- | -------------------------- | -| Applicable | Server and Client | -| Meaning | Log level of common module | -| Value Range | Same as debugFlag | -| Default Value | | +| Attribute | Description | +| -------- | -------------------- | +| Applicable | Server Only | +| Meaning | Log level of dnode | +| Value Range | same as debugFlag | +| Default Value | 135 | -### httpDebugFlag +### vDebugFlag -| Attribute | Description | -| ------------- | ------------------------------------------- | -| Applicable | Server Only | -| Meaning | Log level of http module (prior to 2.4.0.0) | -| Value Range | Same as debugFlag | -| Default Value | | +| Attribute | Description | +| -------- | -------------------- | +| Applicable | Server Only | +| Meaning | Log level of vnode | +| Value Range | same as debugFlag | +| Default Value | | -### mqttDebugFlag +### mDebugFlag -| Attribute | Description | -| ------------- | ------------------------ | -| Applicable | Server Only | -| Meaning | Log level of mqtt module | -| Value Range | Same as debugFlag | -| Default Value | | +| Attribute | Description | +| -------- | -------------------- | +| Applicable | Server Only | +| Meaning | Log level of mnode module | +| Value Range | same as debugFlag | +| Default Value | 135 | -### monitorDebugFlag +### wDebugFlag -| Attribute | Description | -| ------------- | ------------------------------ | -| Applicable | Server Only | -| Meaning | Log level of monitoring module | -| Value Range | Same as debugFlag | -| Default Value | | +| Attribute | Description | +| -------- | ------------------ | +| Applicable | Server Only | +| Meaning | Log level of WAL module | +| Value Range | same as debugFlag | +| Default Value | 135 | -### qDebugFlag +### sDebugFlag -| Attribute | Description | -| ------------- | ------------------------- | +| Attribute | Description | +| -------- | -------------------- | | Applicable | Server and Client | -| Meaning | Log level of query module | -| Value Range | Same as debugFlag | -| Default Value | | - -### vDebugFlag - -| Attribute | Description | -| ------------- | ------------------ | -| Applicable | Server and Client | -| Meaning | Log level of vnode | -| Value Range | Same as debugFlag | -| Default Value | | +| Meaning | Log level of sync module | +| Value Range | same as debugFlag | +| Default Value | 135 | ### tsdbDebugFlag -| Attribute | Description | -| ------------- | ------------------------ | -| Applicable | Server Only | -| Meaning | Log level of TSDB module | -| Value Range | Same as debugFlag | -| Default Value | | +| Attribute | Description | +| -------- | ------------------- | +| Applicable | Server Only | +| Meaning | Log level of TSDB module | +| Value Range | same as debugFlag | +| Default Value | | -### cqDebugFlag +### tqDebugFlag | Attribute | Description | -| ------------- | ------------------------------------ | -| Applicable | Server and Client | -| Meaning | Log level of continuous query module | -| Value Range | Same as debugFlag | -| Default Value | | +| -------- | ----------------- | +| Applicable | Server only | +| Meaning | Log level of TQ module | +| Value Range | same as debugFlag | +| Default Value | | -## Client Only +### fsDebugFlag -### maxSQLLength +| Attribute | Description | +| -------- | ----------------- | +| Applicable | Server only | +| Meaning | Log level of FS module | +| Value Range | same as debugFlag | +| Default Value | | + +### udfDebugFlag | Attribute | Description | -| ------------- | -------------------------------------- | -| Applicable | Client Only | -| Meaning | Maximum length of single SQL statement | -| Unit | bytes | -| Value Range | 65480-1048576 | -| Default Value | 1048576 | +| -------- | ------------------ | +| Applicable | Server Only | +| Meaning | Log level of UDF module | +| Value Range | same as debugFlag | +| Default Value | | + +### smaDebugFlag -### tscEnableRecordSql +| Attribute | Description | +| -------- | ------------------ | +| Applicable | Server Only | +| Meaning | Log level of SMA module | +| Value Range | same as debugFlag | +| Default Value | | -| Attribute | Description | -| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| Meaning | Whether to record SQL statements in file | -| Value Range | 0: false, 1: true | -| Default Value | 0 | -| Note | The generated files are named as "tscnote-xxxx.0/tscnote-xxx.1" in which "xxxx" is the pid of the client, and located at same place as client log | +### idxDebugFlag -### maxBinaryDisplayWidth +| Attribute | Description | +| -------- | -------------------- | +| Applicable | Server Only | +| Meaning | Log level of index module | +| Value Range | same as debugFlag | +| Default Value | | -| Attribute | Description | -| ------------- | --------------------------------------------------------------------------------------------------- | -| Meaning | Maximum display width of binary and nchar in taos shell. Anything beyond this limit would be hidden | -| Value Range | 5 - | -| Default Value | 30 | +### tdbDebugFlag -:::info -If the length of value exceeds `maxBinaryDisplayWidth`, then the actual display width is max(column name, maxBinaryDisplayLength); otherwise the actual display width is max(length of column name, length of column value). This parameter can also be changed dynamically using `set max_binary_display_width ` in TDengine CLI `taos`. +| Attribute | Description | +| -------- | ------------------ | +| Applicable | Server Only | +| Meaning | Log level of TDB module | +| Value Range | same as debugFlag | +| Default Value | | -::: +## Schemaless Parameters -### maxWildCardsLength +### smlChildTableName -| Attribute | Description | -| ------------- | ----------------------------------------------------- | -| Meaning | The maximum length for wildcard string used with LIKE | -| Unit | bytes | -| Value Range | 0-16384 | -| Default Value | 100 | -| Note | From version 2.1.6.1 | +| Attribute | Description | +| -------- | ------------------------- | +| Applicable | Client only | +| Meaning | Custom subtable name for schemaless writes | +| Type | String | +| Default Value | None | -### clientMerge +### smlTagName -| Attribute | Description | -| ------------- | --------------------------------------------------- | -| Meaning | Whether to filter out duplicate data on client side | -| Value Range | 0: false; 1: true | -| Default Value | 0 | -| Note | From version 2.3.0.0 | +| Attribute | Description | +| -------- | ------------------------------------ | +| Applicable | Client only | +| Meaning | Default tag for schemaless writes without tag value specified | +| Type | String | +| Default Value | _tag_null | -### maxRegexStringLen +### smlDataFormat | Attribute | Description | -| ------------- | ------------------------------------ | -| Meaning | Maximum length of regular expression | -| Value Range | [128, 16384] | -| Default Value | 128 | -| Note | From version 2.3.0.0 | +| -------- | ----------------------------- | +| Applicable | Client only | +| Meaning | Whether schemaless columns are consistently ordered | +| Value Range | 0: not consistent; 1: consistent. | +| Default | 1 | ## Other Parameters ### enableCoreFile -| Attribute | Description | -| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Attribute | Description | +| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ | | Applicable | Server and Client | | Meaning | Whether to generate core file when server crashes | | Value Range | 0: false, 1: true | | Default Value | 1 | | Note | The core file is generated under root directory `systemctl start taosd` is used to start, or under the working directory if `taosd` is started directly on Linux Shell. | + +### udf + +| Attribute | Description | +| -------- | ------------------ | +| Applicable | Server Only | +| Meaning | Whether the UDF service is enabled | +| Value Range | 0: disable UDF; 1: enabled UDF | +| Default Value | 1 | + +## Parameter Comparison of TDengine 2.x and 3.0 +| # | **Parameter** | **In 2.x** | **In 3.0** | +| --- | :-----------------: | --------------- | --------------- | +| 1 | firstEp | Yes | Yes | +| 2 | secondEp | Yes | Yes | +| 3 | fqdn | Yes | Yes | +| 4 | serverPort | Yes | Yes | +| 5 | maxShellConns | Yes | Yes | +| 6 | monitor | Yes | Yes | +| 7 | monitorFqdn | No | Yes | +| 8 | monitorPort | No | Yes | +| 9 | monitorInterval | Yes | Yes | +| 10 | monitorMaxLogs | No | Yes | +| 11 | monitorComp | No | Yes | +| 12 | telemetryReporting | Yes | Yes | +| 13 | telemetryInterval | No | Yes | +| 14 | telemetryServer | No | Yes | +| 15 | telemetryPort | No | Yes | +| 16 | queryPolicy | No | Yes | +| 17 | querySmaOptimize | No | Yes | +| 18 | queryBufferSize | Yes | Yes | +| 19 | maxNumOfDistinctRes | Yes | Yes | +| 20 | minSlidingTime | Yes | Yes | +| 21 | minIntervalTime | Yes | Yes | +| 22 | countAlwaysReturnValue | Yes | Yes | +| 23 | dataDir | Yes | Yes | +| 24 | minimalDataDirGB | Yes | Yes | +| 25 | supportVnodes | No | Yes | +| 26 | tempDir | Yes | Yes | +| 27 | minimalTmpDirGB | Yes | Yes | +| 28 | compressMsgSize | Yes | Yes | +| 29 | compressColData | Yes | Yes | +| 30 | smlChildTableName | Yes | Yes | +| 31 | smlTagName | Yes | Yes | +| 32 | smlDataFormat | No | Yes | +| 33 | statusInterval | Yes | Yes | +| 34 | shellActivityTimer | Yes | Yes | +| 35 | transPullupInterval | No | Yes | +| 36 | mqRebalanceInterval | No | Yes | +| 37 | ttlUnit | No | Yes | +| 38 | ttlPushInterval | No | Yes | +| 39 | numOfTaskQueueThreads | No | Yes | +| 40 | numOfRpcThreads | No | Yes | +| 41 | numOfCommitThreads | Yes | Yes | +| 42 | numOfMnodeReadThreads | No | Yes | +| 43 | numOfVnodeQueryThreads | No | Yes | +| 44 | numOfVnodeStreamThreads | No | Yes | +| 45 | numOfVnodeFetchThreads | No | Yes | +| 46 | numOfVnodeWriteThreads | No | Yes | +| 47 | numOfVnodeSyncThreads | No | Yes | +| 48 | numOfQnodeQueryThreads | No | Yes | +| 49 | numOfQnodeFetchThreads | No | Yes | +| 50 | numOfSnodeSharedThreads | No | Yes | +| 51 | numOfSnodeUniqueThreads | No | Yes | +| 52 | rpcQueueMemoryAllowed | No | Yes | +| 53 | logDir | Yes | Yes | +| 54 | minimalLogDirGB | Yes | Yes | +| 55 | numOfLogLines | Yes | Yes | +| 56 | asyncLog | Yes | Yes | +| 57 | logKeepDays | Yes | Yes | +| 58 | debugFlag | Yes | Yes | +| 59 | tmrDebugFlag | Yes | Yes | +| 60 | uDebugFlag | Yes | Yes | +| 61 | rpcDebugFlag | Yes | Yes | +| 62 | jniDebugFlag | Yes | Yes | +| 63 | qDebugFlag | Yes | Yes | +| 64 | cDebugFlag | Yes | Yes | +| 65 | dDebugFlag | Yes | Yes | +| 66 | vDebugFlag | Yes | Yes | +| 67 | mDebugFlag | Yes | Yes | +| 68 | wDebugFlag | Yes | Yes | +| 69 | sDebugFlag | Yes | Yes | +| 70 | tsdbDebugFlag | Yes | Yes | +| 71 | tqDebugFlag | No | Yes | +| 72 | fsDebugFlag | Yes | Yes | +| 73 | udfDebugFlag | No | Yes | +| 74 | smaDebugFlag | No | Yes | +| 75 | idxDebugFlag | No | Yes | +| 76 | tdbDebugFlag | No | Yes | +| 77 | metaDebugFlag | No | Yes | +| 78 | timezone | Yes | Yes | +| 79 | locale | Yes | Yes | +| 80 | charset | Yes | Yes | +| 81 | udf | Yes | Yes | +| 82 | enableCoreFile | Yes | Yes | +| 83 | arbitrator | Yes | No | +| 84 | numOfThreadsPerCore | Yes | No | +| 85 | numOfMnodes | Yes | No | +| 86 | vnodeBak | Yes | No | +| 87 | balance | Yes | No | +| 88 | balanceInterval | Yes | No | +| 89 | offlineThreshold | Yes | No | +| 90 | role | Yes | No | +| 91 | dnodeNopLoop | Yes | No | +| 92 | keepTimeOffset | Yes | No | +| 93 | rpcTimer | Yes | No | +| 94 | rpcMaxTime | Yes | No | +| 95 | rpcForceTcp | Yes | No | +| 96 | tcpConnTimeout | Yes | No | +| 97 | syncCheckInterval | Yes | No | +| 98 | maxTmrCtrl | Yes | No | +| 99 | monitorReplica | Yes | No | +| 100 | smlTagNullName | Yes | No | +| 101 | keepColumnName | Yes | No | +| 102 | ratioOfQueryCores | Yes | No | +| 103 | maxStreamCompDelay | Yes | No | +| 104 | maxFirstStreamCompDelay | Yes | No | +| 105 | retryStreamCompDelay | Yes | No | +| 106 | streamCompDelayRatio | Yes | No | +| 107 | maxVgroupsPerDb | Yes | No | +| 108 | maxTablesPerVnode | Yes | No | +| 109 | minTablesPerVnode | Yes | No | +| 110 | tableIncStepPerVnode | Yes | No | +| 111 | cache | Yes | No | +| 112 | blocks | Yes | No | +| 113 | days | Yes | No | +| 114 | keep | Yes | No | +| 115 | minRows | Yes | No | +| 116 | maxRows | Yes | No | +| 117 | quorum | Yes | No | +| 118 | comp | Yes | No | +| 119 | walLevel | Yes | No | +| 120 | fsync | Yes | No | +| 121 | replica | Yes | No | +| 122 | partitions | Yes | No | +| 123 | quorum | Yes | No | +| 124 | update | Yes | No | +| 125 | cachelast | Yes | No | +| 126 | maxSQLLength | Yes | No | +| 127 | maxWildCardsLength | Yes | No | +| 128 | maxRegexStringLen | Yes | No | +| 129 | maxNumOfOrderedRes | Yes | No | +| 130 | maxConnections | Yes | No | +| 131 | mnodeEqualVnodeNum | Yes | No | +| 132 | http | Yes | No | +| 133 | httpEnableRecordSql | Yes | No | +| 134 | httpMaxThreads | Yes | No | +| 135 | restfulRowLimit | Yes | No | +| 136 | httpDbNameMandatory | Yes | No | +| 137 | httpKeepAlive | Yes | No | +| 138 | enableRecordSql | Yes | No | +| 139 | maxBinaryDisplayWidth | Yes | No | +| 140 | stream | Yes | No | +| 141 | retrieveBlockingModel | Yes | No | +| 142 | tsdbMetaCompactRatio | Yes | No | +| 143 | defaultJSONStrType | Yes | No | +| 144 | walFlushSize | Yes | No | +| 145 | keepTimeOffset | Yes | No | +| 146 | flowctrl | Yes | No | +| 147 | slaveQuery | Yes | No | +| 148 | adjustMaster | Yes | No | +| 149 | topicBinaryLen | Yes | No | +| 150 | telegrafUseFieldNum | Yes | No | +| 151 | deadLockKillQuery | Yes | No | +| 152 | clientMerge | Yes | No | +| 153 | sdbDebugFlag | Yes | No | +| 154 | odbcDebugFlag | Yes | No | +| 155 | httpDebugFlag | Yes | No | +| 156 | monDebugFlag | Yes | No | +| 157 | cqDebugFlag | Yes | No | +| 158 | shortcutFlag | Yes | No | +| 159 | probeSeconds | Yes | No | +| 160 | probeKillSeconds | Yes | No | +| 161 | probeInterval | Yes | No | +| 162 | lossyColumns | Yes | No | +| 163 | fPrecision | Yes | No | +| 164 | dPrecision | Yes | No | +| 165 | maxRange | Yes | No | +| 166 | range | Yes | No | diff --git a/docs/en/14-reference/12-directory.md b/docs/en/14-reference/12-directory.md index 118bce8037fdae5b303b45988277d10a99aa5445..19b036418f18637bfd21fa286f24528c649d146d 100644 --- a/docs/en/14-reference/12-directory.md +++ b/docs/en/14-reference/12-directory.md @@ -29,10 +29,7 @@ All executable files of TDengine are in the _/usr/local/taos/bin_ directory by d - _set_core.sh_: script for setting up the system to generate core dump files for easy debugging - _taosd-dump-cfg.gdb_: script to facilitate debugging of taosd's gdb execution. -:::note -taosdump after version 2.4.0.0 require taosTools as a standalone installation. A new version of taosBenchmark is include in taosTools too. -::: - :::tip You can configure different data directories and log directories by modifying the system configuration file `taos.cfg`. + ::: diff --git a/docs/en/14-reference/13-schemaless/13-schemaless.md b/docs/en/14-reference/13-schemaless/13-schemaless.md index 8b6a26ae52af42e339e2f5a8d0824a9e1be3f386..4f50c38cbbfda9d8d8567517f9109f18e2007988 100644 --- a/docs/en/14-reference/13-schemaless/13-schemaless.md +++ b/docs/en/14-reference/13-schemaless/13-schemaless.md @@ -1,9 +1,10 @@ --- title: Schemaless Writing -description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface." +description: 'The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface.' --- -In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. To provide the flexibility needed in such cases and in a rapidly changing IoT landscape, TDengine provides a series of interfaces for the schemaless writing method. These interfaces eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. When necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly. +In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. Schemaless writing automatically creates storage structures for your data as it is being written to TDengine, so that you do not need to create supertables in advance. When necessary, schemaless writing +will automatically add the required columns to ensure that the data written by the user is stored correctly. The schemaless writing method creates super tables and their corresponding subtables. These are completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and they lack readability. @@ -19,12 +20,12 @@ With the following formatting conventions, schemaless writing uses a single stri measurement,tag_set field_set timestamp ``` -where : +where: - measurement will be used as the data table name. It will be separated from tag_set by a comma. -- tag_set will be used as tag data in the format `=,=`, i.e. multiple tags' data can be separated by a comma. It is separated from field_set by space. -- field_set will be used as normal column data in the format of `=,=`, again using a comma to separate multiple normal columns of data. It is separated from the timestamp by a space. -- The timestamp is the primary key corresponding to the data in this row. +- `tag_set` will be used as tags, with format like `=,=` Enter a space between `tag_set` and `field_set`. +- `field_set`will be used as data columns, with format like `=,=` Enter a space between `field_set` and `timestamp`. +- `timestamp` is the primary key timestamp corresponding to this row of data All data in tag_set is automatically converted to the NCHAR data type and does not require double quotes ("). @@ -35,18 +36,20 @@ In the schemaless writing data line protocol, each data item in the field_set ne - Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\\) in front. (All refer to the ASCII character) - Numeric types will be distinguished from data types by the suffix. -| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** | -| -------- | -------- | ------------ | -------------- | -| 1 | none or f64 | double | 8 | -| 2 | f32 | float | 4 | -| 3 | i8/u8 | TinyInt/UTinyInt | 1 | -| 4 | i16/u16 | SmallInt/USmallInt | 2 | -| 5 | i32/u32 | Int/UInt | 4 | -| 6 | i64/i/u64/u | Bigint/Bigint/UBigint/UBigint | 8 | +| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** | +| ----------------- | ----------- | ----------------------------- | ---------------- | +| 1 | None or f64 | double | 8 | +| 2 | f32 | float | 4 | +| 3 | i8/u8 | TinyInt/UTinyInt | 1 | +| 4 | i16/u16 | SmallInt/USmallInt | 2 | +| 5 | i32/u32 | Int/UInt | 4 | +| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 | - `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types. -For example, the following data rows indicate that the t1 label is "3" (NCHAR), the t2 label is "4" (NCHAR), and the t3 label is "t3" to the super table named `st` labeled "t3" (NCHAR), write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column is "passit" (BINARY), c4 column is 4 (DOUBLE), and the primary key timestamp is 1626006833639000000 in one row. +For example, the following data rows indicate that the t1 label is "3" (NCHAR), the t2 label is "4" (NCHAR), and the t3 label +is "t3" to the super table named `st` labeled "t3" (NCHAR), write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column +is "passit" (BINARY), c4 column is 4 (DOUBLE), and the primary key timestamp is 1626006833639000000 in one row. ```json st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 @@ -58,102 +61,105 @@ Note that if the wrong case is used when describing the data type suffix, or if Schemaless writes process row data according to the following principles. -1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string: +1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string: ```json "measurement,tag_key1=tag_value1,tag_key2=tag_value2" ``` Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol. -The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t*" is a fixed prefix that every table generated by this mapping relationship has. +The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t_" is a fixed prefix that every table generated by this mapping relationship has. +You can configure smlChildTableName to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored. 2. If the super table obtained by parsing the line protocol does not exist, this super table is created. -If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2. +3. If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2. 4. If the specified tag or regular column in the data row does not exist, the corresponding tag or regular column is added to the super table (only incremental). -5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to NULL. +5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to + NULL. 6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data. 7. Errors encountered throughout the processing will interrupt the writing process and return an error code. -8. In order to improve the efficiency of writing, it is assumed by default that the order of the fields in the same Super is the same (the first data contains all fields, and the following data is in this order). If the order is different, the parameter smlDataFormat needs to be configured to be false. Otherwise, the data is written in the same order, and the data in the library will be abnormal. +8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat to false. Otherwise, data will be written out of order and a database error will occur. :::tip -All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 16k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. +All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed +16KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. + ::: ## Time resolution recognition Three specified modes are supported in the schemaless writing process, as follows: -| **Serial** | **Value** | **Description** | -| -------- | ------------------- | ------------------------------- | -| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol | -| 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol | -| 3 | SML_JSON_PROTOCOL | JSON protocol format | - -In the SML_LINE_PROTOCOL parsing mode, the user is required to specify the time resolution of the input timestamp. The available time resolutions are shown in the following table. +| **Serial** | **Value** | **Description** | +| ---------- | ------------------- | ---------------------- | +| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol | +| 2 | SML_TELNET_PROTOCOL | OpenTSDB file protocol | +| 3 | SML_JSON_PROTOCOL | OpenTSDB JSON protocol | -| **Serial Number** | **Time Resolution Definition** | **Meaning** | -| -------- | --------------------------------- | -------------- | -| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | Not defined (invalid) | -| 2 | TSDB_SML_TIMESTAMP_HOURS | hour | -| 3 | TSDB_SML_TIMESTAMP_MINUTES | MINUTES -| 4 | TSDB_SML_TIMESTAMP_SECONDS | SECONDS -| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | milliseconds -| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | microseconds -| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | nanoseconds | +In InfluxDB line protocol mode, you must specify the precision of the input timestamp. Valid precisions are described in the following table. -In SML_TELNET_PROTOCOL and SML_JSON_PROTOCOL modes, the time precision is determined based on the length of the timestamp (in the same way as the OpenTSDB standard operation), and the user-specified time resolution is ignored at this point. +| **No.** | **Precision** | **Description** | +| ------- | --------------------------------- | --------------------- | +| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | Not defined (invalid) | +| 2 | TSDB_SML_TIMESTAMP_HOURS | Hours | +| 3 | TSDB_SML_TIMESTAMP_MINUTES | Minutes | +| 4 | TSDB_SML_TIMESTAMP_SECONDS | Seconds | +| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | Milliseconds | +| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | Microseconds | +| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | Nanoseconds | -## Data schema mapping rules +In OpenTSDB file and JSON protocol modes, the precision of the timestamp is determined from its length in the standard OpenTSDB manner. User input is ignored. -This section describes how data for line protocols are mapped to data with a schema. The data measurement in each line protocol is mapped as follows: -- The tag name in tag_set is the name of the tag in the data schema -- The name in field_set is the column's name. +## Data Model Mapping -The following data is used as an example to illustrate the mapping rules. +This section describes how data in line protocol is mapped to a schema. The data measurement in each line is mapped to a +supertable name. The tag name in tag_set is the tag name in the schema, and the name in field_set is the column name in the schema. The following example shows how data is mapped: ```json st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 ``` -The row data mapping generates a super table: `st`, which contains three labels of type NCHAR: t1, t2, t3. Five data columns are ts (timestamp), c1 (bigint), c3 (binary), c2 (bool), c4 (bigint). The mapping becomes the following SQL statement. +This row is mapped to a supertable: `st` contains three NCHAR tags: t1, t2, and t3. Five columns are created: ts (timestamp), c1 (bigint), c3 (binary), c2 (bool), and c4 (bigint). The following SQL statement is generated: ```json create stable st (_ts timestamp, c1 bigint, c2 bool, c3 binary(6), c4 bigint) tags(t1 nchar(1), t2 nchar(1), t3 nchar(2)) ``` -## Data schema change handling +## Processing Schema Changes -This section describes the impact on the data schema for different line protocol data writing cases. +This section describes the impact on the schema caused by different data being written. -When writing to an explicitly identified field type using the line protocol, subsequent changes to the field's type definition will result in an explicit data schema error, i.e., will trigger a write API report error. As shown below, the +If you use line protocol to write to a specific tag field and then later change the field type, a schema error will ocur. This triggers an error on the write API. This is shown as follows: ```json -st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4 1626006833639000000 -st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4i 1626006833640000000 +st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4 1626006833639000000 +st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4i 1626006833640000000 ``` -The data type mapping in the first row defines column c4 as DOUBLE, but the data in the second row is declared as BIGINT by the numeric suffix, which triggers a parsing error with schemaless writing. +The first row defines c4 as a double. However, in the second row, the suffix indicates that the value of c4 is a bigint. This causes schemaless writing to throw an error. -If the line protocol before the column declares the data column as BINARY, the subsequent one requires a longer binary length, which triggers a super table schema change. +An error also occurs if data input into a binary column exceeds the defined length of the column. ```json -st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000 -st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000 +st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000 +st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000 ``` -The first line of the line protocol parsing will declare column c5 is a BINARY(4) field. The second line data write will parse column c5 as a BINARY column. But in the second line, c5's width is 6 so you need to increase the width of the BINARY field to be able to accommodate the new string. +The first row defines c5 as a binary(4). but the second row writes 6 bytes to it. This means that the length of the binary column must be expanded to contain the data. ```json -st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000 -st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000 +st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000 +st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000 ``` -The second line of data has an additional column c6 of type BINARY(6) compared to the first row. Then a column c6 of type BINARY(6) is automatically added at this point. +The preceding data includes a new entry, c6, with type binary(6). When this occurs, a new column c6 with type binary(6) is added automatically. -## Write integrity +## Write Integrity -TDengine provides idempotency guarantees for data writing, i.e., you can repeatedly call the API to write data with errors. However, it does not give atomicity guarantees for writing multiple rows of data. During the process of writing numerous rows of data in one batch, some data will be written successfully, and some data will fail. +TDengine guarantees the idempotency of data writes. This means that you can repeatedly call the API to perform write operations with bad data. However, TDengine does not guarantee the atomicity of multi-row writes. In a multi-row write, some data may be written successfully and other data unsuccessfully. -## Error code +##: Error Codes -If it is an error in the data itself during the schemaless writing process, the application will get `TSDB_CODE_TSC_LINE_SYNTAX_ERROR` error message, which indicates that the error occurred in writing. The other error codes are consistent with the TDengine and can be obtained via the `taos_errstr()` to get the specific cause of the error. +The TSDB_CODE_TSC_LINE_SYNTAX_ERROR indicates an error in the schemaless writing component. +This error occurs when writing text. For other errors, schemaless writing uses the standard TDengine error codes +found in taos_errstr. diff --git a/docs/en/14-reference/14-taosKeeper.md b/docs/en/14-reference/14-taosKeeper.md new file mode 100644 index 0000000000000000000000000000000000000000..665bc75380d4f59666d792d074fb37c65c810264 --- /dev/null +++ b/docs/en/14-reference/14-taosKeeper.md @@ -0,0 +1,143 @@ +--- +sidebar_label: taosKeeper +title: taosKeeper +description: exports TDengine monitoring metrics. +--- + +## Introduction + +taosKeeper is a tool for TDengine that exports monitoring metrics. With taosKeeper, you can easily monitor the operational status of your TDengine deployment. taosKeeper uses the TDengine REST API. It is not necessary to install TDengine Client to use taosKeeper. + +## Installation + + +Methods of installing taosKeeper: + + + +- You can compile taosKeeper separately and install it. Please refer to the [taosKeeper](https://github.com/taosdata/taoskeeper) repository for details. --> +You can compile taosKeeper separately and install it. Please refer to the [taosKeeper](https://github.com/taosdata/taoskeeper) repository for details. + +## Run + +### Configuration and running methods + +taosKeeper needs to be executed on the terminal of the operating system, it supports three configuration methods: [Command-line arguments](#command-line-arguments-in-detail), [environment variable](#environment-variable-in-detail) and [configuration file](#configuration-file-parameters-in-detail). The precedence of those is Command-line, environment variable and configuration file. + +**Make sure that the TDengine cluster is running correctly before running taosKeeper. ** Ensure that the monitoring service in TDengine has been started. For more information, see [TDengine Monitoring Configuration](../config/#monitoring). + +### Command-Line Parameters + +You can use command-line parameters to run taosKeeper and control its behavior: + +```shell +$ taosKeeper +``` +### Environment variable + +You can use Environment variable to run taosKeeper and control its behavior: + +```shell +$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3 + +$ taoskeeper +``` + +you can run `taoskeeper -h` for more detail. + +### Configuration File + +You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/keeper.toml` is used by default. If this file does not specify configurations, the default values are used. + +```shell +$ taoskeeper -c +``` + +**Sample configuration files** +```toml +# enable debug in gin framework +debug = false + +# listen to server port, default 6043 +port = 6043 + +# set log level to panic, error, info, debug, or trace +loglevel = "info" + +# set pool size +gopoolsize = 50000 + +# query rotation period for TDengine monitoring data +RotationInterval = "15s" + +[tdengine] +host = "127.0.0.1" +port = 6041 +username = "root" +password = "taosdata" + +# set taosAdapter to monitor +[taosAdapter] +address = ["127.0.0.1:6041","192.168.1.95:6041"] + +[metrics] +# monitoring metric prefix +prefix = "taos" + +# cluster data identifier +cluster = "production" + +# database to store monitoring data +database = "log" + +# standard tables to monitor +tables = ["normal_table"] +``` + +### Obtain Monitoring Metrics + +taosKeeper records monitoring metrics generated by TDengine in a specified database and provides an interface through which you can export the data. + +#### View Monitoring Results + +```shell +$ taos +# the log database is used in this example +> use log; +> select * from cluster_info limit 1; +``` + +Example result set: + +```shell + ts | first_ep | first_ep_dnode_id | version | master_uptime | monitor_interval | dbs_total | tbs_total | stbs_total | dnodes_total | dnodes_alive | mnodes_total | mnodes_alive | vgroups_total | vgroups_alive | vnodes_total | vnodes_alive | connections_total | protocol | cluster_id | +=============================================================================================================================================================================================================================================================================================================================================================================== + 2022-08-16 17:37:01.629 | hlb:6030 | 1 | 3.0.0.0 | 0.27250 | 15 | 2 | 27 | 38 | 1 | 1 | 1 | 1 | 4 | 4 | 4 | 4 | 14 | 1 | 5981392874047724755 | +Query OK, 1 rows in database (0.036162s) +``` + +#### Export Monitoring Metrics + +```shell +$ curl http://127.0.0.1:6043/metrics +``` + +Sample result set (excerpt): + +```shell +# HELP taos_cluster_info_connections_total +# TYPE taos_cluster_info_connections_total counter +taos_cluster_info_connections_total{cluster_id="5981392874047724755"} 16 +# HELP taos_cluster_info_dbs_total +# TYPE taos_cluster_info_dbs_total counter +taos_cluster_info_dbs_total{cluster_id="5981392874047724755"} 2 +# HELP taos_cluster_info_dnodes_alive +# TYPE taos_cluster_info_dnodes_alive counter +taos_cluster_info_dnodes_alive{cluster_id="5981392874047724755"} 1 +# HELP taos_cluster_info_dnodes_total +# TYPE taos_cluster_info_dnodes_total counter +taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1 +# HELP taos_cluster_info_first_ep +# TYPE taos_cluster_info_first_ep gauge +taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1 +``` \ No newline at end of file diff --git a/docs/en/14-reference/_category_.yml b/docs/en/14-reference/_category_.yml index 5f5466532be79153d42da0907df6336439593601..1fb42e60a7c2872dbf9f66096ea9a38c8aa4a295 100644 --- a/docs/en/14-reference/_category_.yml +++ b/docs/en/14-reference/_category_.yml @@ -1 +1 @@ -label: Reference +label: Reference \ No newline at end of file diff --git a/docs/en/14-reference/index.md b/docs/en/14-reference/index.md index f350eebfc1a1ca2feaedc18c4b4fa798742e31b4..f3a64913d065d1d8e321ce7433c9d605ef70bd13 100644 --- a/docs/en/14-reference/index.md +++ b/docs/en/14-reference/index.md @@ -2,11 +2,11 @@ title: Reference --- -The reference guide is a detailed introduction to TDengine including various TDengine connectors in different languages, and the tools that come with TDengine. +This section describes the TDengine connectors and utilities. ```mdx-code-block import DocCardList from '@theme/DocCardList'; import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; -``` +``` \ No newline at end of file diff --git a/docs/en/20-third-party/01-grafana.mdx b/docs/en/20-third-party/01-grafana.mdx index 5dbeb31a231464e48b4f977420f03f0ede81e78e..e0fbefd5a8634d2001f2cc0601afa110aff33632 100644 --- a/docs/en/20-third-party/01-grafana.mdx +++ b/docs/en/20-third-party/01-grafana.mdx @@ -6,9 +6,7 @@ title: Grafana import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a dashboard. - -You can learn more about using the TDengine plugin on [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md). +TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a dashboard. You can learn more about using the TDengine plugin on [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md). ## Prerequisites @@ -65,7 +63,6 @@ Restart Grafana service and open Grafana in web-browser, usually - Follow the installation steps in [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) with the [``grafana-cli`` command-line tool](https://grafana.com/docs/grafana/latest/administration/cli/) for plugin installation. @@ -76,7 +73,7 @@ grafana-cli plugins install tdengine-datasource sudo -u grafana grafana-cli plugins install tdengine-datasource ``` -Alternatively, you can manually download the .zip file from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and unpack it into your grafana plugins directory. +You can also download zip files from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and install manually. The commands are as follows: ```bash GF_VERSION=3.2.2 @@ -131,7 +128,7 @@ docker run -d \ grafana/grafana ``` -You can setup a zero-configuration stack for TDengine + Grafana by [docker-compose](https://docs.docker.com/compose/) and [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) file: +You can setup a zero-configuration stack for TDengine + Grafana by [docker-compose](https://docs.docker.com/compose/) and [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) file: 1. Save the provisioning configuration file to `tdengine.yml`. @@ -196,7 +193,7 @@ Go back to the main interface to create a dashboard and click Add Query to enter As shown above, select the `TDengine` data source in the `Query` and enter the corresponding SQL in the query box below for query. -- INPUT SQL: enter the statement to be queried (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)`, where, from, to and interval are built-in variables of the TDengine plugin, indicating the range and time interval of queries fetched from the Grafana plugin panel. In addition to the built-in variables, custom template variables are also supported. +- INPUT SQL: Enter the desired query (the results being two columns and multiple rows), such as `select _wstart, avg(mem_system) from log.dnodes_info where ts >= $from and ts < $to interval($interval)`. In this statement, $from, $to, and $interval are variables that Grafana replaces with the query time range and interval. In addition to the built-in variables, custom template variables are also supported. - ALIAS BY: This allows you to set the current query alias. - GENERATE SQL: Clicking this button will automatically replace the corresponding variables and generate the final executed statement. @@ -208,7 +205,11 @@ Follow the default prompt to query the average system memory usage for the speci ### Importing the Dashboard -You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. The dashboard is published in Grafana as [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167). Check the [TDinsight User Manual](/reference/tdinsight/) for the details. +You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x. + +![TDengine Database Grafana plugine import dashboard](./import_dashboard.webp) + +A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。 Check the [TDinsight User Manual](/reference/tdinsight/) for the details. For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list: diff --git a/docs/en/20-third-party/06-statsd.md b/docs/en/20-third-party/06-statsd.md index 40e927b9fd1d2eca9d454a987ac51d533eb75005..32b1bbb97acafd2494c7fadb8af3d06cf69219ea 100644 --- a/docs/en/20-third-party/06-statsd.md +++ b/docs/en/20-third-party/06-statsd.md @@ -1,6 +1,6 @@ --- sidebar_label: StatsD -title: StatsD writing +title: StatsD Writing --- import StatsD from "../14-reference/_statsd.mdx" @@ -12,8 +12,8 @@ You can write StatsD data to TDengine by simply modifying the configuration file ## Prerequisites To write StatsD data to TDengine requires the following preparations. -- The TDengine cluster has been deployed and is working properly -- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details. +1. The TDengine cluster is deployed and functioning properly +2. taosAdapter is installed and running properly. Please refer to the taosAdapter manual for details. - StatsD has been installed. To install StatsD, please refer to [official documentation](https://github.com/statsd/statsd) ## Configuration steps @@ -39,9 +39,6 @@ $ echo "foo:1|c" | nc -u -w0 127.0.0.1 8125 Use the TDengine CLI to verify that StatsD data is written to TDengine and can read out correctly. ``` -Welcome to the TDengine shell from Linux, Client Version:2.4.0.0 -Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. - taos> show databases; name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | ==================================================================================================================================================================================================================================================================================== diff --git a/docs/en/20-third-party/09-emq-broker.md b/docs/en/20-third-party/09-emq-broker.md index 0900dd3d7571dc0ab8d93174aa2d7b5eccf1fbf5..2ead1bbaf40f06fec2a5cbf85e46fdfdcc5216df 100644 --- a/docs/en/20-third-party/09-emq-broker.md +++ b/docs/en/20-third-party/09-emq-broker.md @@ -9,7 +9,7 @@ MQTT is a popular IoT data transfer protocol. [EMQX](https://github.com/emqx/emq The following preparations are required for EMQX to add TDengine data sources correctly. - The TDengine cluster is deployed and working properly -- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details. +- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details. - If you use the emulated writers described later, you need to install the appropriate version of Node.js. V12 is recommended. ## Install and start EMQX @@ -28,8 +28,6 @@ USE test; CREATE TABLE sensor_data (ts TIMESTAMP, temperature FLOAT, humidity FLOAT, volume FLOAT, pm10 FLOAT, pm25 FLOAT, so2 FLOAT, no2 FLOAT, co FLOAT, sensor_id NCHAR(255), area TINYINT, coll_time TIMESTAMP); ``` -Note: The table schema is based on the blog [(In Chinese) Data Transfer, Storage, Presentation, EMQX + TDengine Build MQTT IoT Data Visualization Platform](https://www.taosdata.com/blog/2020/08/04/1722.html) as an example. Subsequent operations are carried out with this blog scenario too. Please modify it according to your actual application scenario. - ## Configuring EMQX Rules Since the configuration interface of EMQX differs from version to version, here is v4.4.5 as an example. For other versions, please refer to the corresponding official documentation. @@ -137,5 +135,5 @@ Use the TDengine CLI program to log in and query the appropriate databases and t ![TDengine Database EMQX result in taos](./emqx/check-result-in-taos.webp) -Please refer to the [TDengine official documentation](https://docs.taosdata.com/) for more details on how to use TDengine. +Please refer to the [TDengine official documentation](https://docs.tdengine.com/) for more details on how to use TDengine. EMQX Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use EMQX. diff --git a/docs/en/20-third-party/10-hive-mq-broker.md b/docs/en/20-third-party/10-hive-mq-broker.md index 333e00fa0e9b724ffbb067a83ad07d0b846b1a23..828a62ac5b336766d5c3770cc42cd3a61cfd8d5d 100644 --- a/docs/en/20-third-party/10-hive-mq-broker.md +++ b/docs/en/20-third-party/10-hive-mq-broker.md @@ -1,6 +1,6 @@ --- sidebar_label: HiveMQ Broker -title: HiveMQ Broker writing +title: HiveMQ Broker Writing --- -[HiveMQ](https://www.hivemq.com/) is an MQTT broker that provides community and enterprise editions. HiveMQ is mainly for enterprise emerging machine-to-machine M2M communication and internal transport, meeting scalability, ease of management, and security features. HiveMQ provides an open-source plug-in development kit. MQTT data can be saved to TDengine via TDengine extension for HiveMQ. Please refer to the [HiveMQ extension - TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README_EN.md) for details on how to use it. \ No newline at end of file +[HiveMQ](https://www.hivemq.com/) is an MQTT broker that provides community and enterprise editions. HiveMQ is mainly for enterprise emerging machine-to-machine M2M communication and internal transport, meeting scalability, ease of management, and security features. HiveMQ provides an open-source plug-in development kit. MQTT data can be saved to TDengine via TDengine extension for HiveMQ. For more information, see [HiveMQ TDengine Extension](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README_EN.md). diff --git a/docs/en/20-third-party/12-google-data-studio.md b/docs/en/20-third-party/12-google-data-studio.md new file mode 100644 index 0000000000000000000000000000000000000000..fc94f98056bbeeeec88ca7ea12a4a6a7e6f15dc5 --- /dev/null +++ b/docs/en/20-third-party/12-google-data-studio.md @@ -0,0 +1,36 @@ +--- +sidebar_label: Google Data Studio +title: Use Google Data Studio to access TDengine +--- + +Data Studio is a powerful tool for reporting and visualization, offering a wide variety of charts and connectors and making it easy to generate reports based on predefined templates. Its ease of use and robust ecosystem have made it one of the first choices for people working in data analysis. + +TDengine is a high-performance, scalable time-series database that supports SQL. Many businesses and developers in fields spanning from IoT and Industry Internet to IT and finance are using TDengine as their time-series database management solution. + +The TDengine team immediately saw the benefits of using TDengine to process time-series data with Data Studio to analyze it, and they got to work to create a connector for Data Studio. + +With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for “TDengine”. + +![02](gds/gds-02.png.webp) + +Select the TDengine connector and click Authorize. + +![03](gds/gds-03.png.webp) + +Then sign in to your Google Account and click Allow to enable the connection to TDengine. + +![04](gds/gds-04.png.webp) + +In the Enter URL field, type the hostname and port of the server running the TDengine REST service. In the following fields, type your username, password, database name, table name, and the start and end times of your query range. Then, click Connect. + +![05](gds/gds-05.png.webp) + +After the connection is established, you can use Data Studio to process your data and create reports. + +![06](gds/gds-06.png.webp) + +In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data – some examples are shown below. + +![07](gds/gds-07.png.webp) + +With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we’re sure you’ll be able to gain new insights and obtain even more value from your data. diff --git a/docs/en/20-third-party/emqx/client-num.webp b/docs/en/20-third-party/emqx/client-num.webp index a151b184843607d67b649babb3145bfb3e329cda..a8ac6fb4c036b1db697d537472732ca854e583c8 100644 Binary files a/docs/en/20-third-party/emqx/client-num.webp and b/docs/en/20-third-party/emqx/client-num.webp differ diff --git a/docs/en/20-third-party/gds/gds-01.webp b/docs/en/20-third-party/gds/gds-01.webp new file mode 100644 index 0000000000000000000000000000000000000000..2e5f9e4ff5db1e37718e2397c9a13a9f0e05602d Binary files /dev/null and b/docs/en/20-third-party/gds/gds-01.webp differ diff --git a/docs/en/20-third-party/gds/gds-02.png.webp b/docs/en/20-third-party/gds/gds-02.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..3b3537f5a488019482f94452e70bd1bd79867ab5 Binary files /dev/null and b/docs/en/20-third-party/gds/gds-02.png.webp differ diff --git a/docs/en/20-third-party/gds/gds-03.png.webp b/docs/en/20-third-party/gds/gds-03.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..5719436d5b2f21aa861067b966511e4b34d17dce Binary files /dev/null and b/docs/en/20-third-party/gds/gds-03.png.webp differ diff --git a/docs/en/20-third-party/gds/gds-04.png.webp b/docs/en/20-third-party/gds/gds-04.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..ddaae5c1a63b6b4db692e12491df55b88dcaadee Binary files /dev/null and b/docs/en/20-third-party/gds/gds-04.png.webp differ diff --git a/docs/en/20-third-party/gds/gds-05.png.webp b/docs/en/20-third-party/gds/gds-05.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..9a917678fc7e60f0a739fa1e2b0f4fa010d12708 Binary files /dev/null and b/docs/en/20-third-party/gds/gds-05.png.webp differ diff --git a/docs/en/20-third-party/gds/gds-06.png.webp b/docs/en/20-third-party/gds/gds-06.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..c76b68d32b5907bd5ba4e4010456f2ca5303448f Binary files /dev/null and b/docs/en/20-third-party/gds/gds-06.png.webp differ diff --git a/docs/en/20-third-party/gds/gds-07.png.webp b/docs/en/20-third-party/gds/gds-07.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..1386ae9c4db4f2465dd071afc5a047658b47031c Binary files /dev/null and b/docs/en/20-third-party/gds/gds-07.png.webp differ diff --git a/docs/en/20-third-party/gds/gds-08.png.webp b/docs/en/20-third-party/gds/gds-08.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..59dcf8b31df8bde8d4073ee0c7b1c7bdd7bd439d Binary files /dev/null and b/docs/en/20-third-party/gds/gds-08.png.webp differ diff --git a/docs/en/20-third-party/gds/gds-09.png.webp b/docs/en/20-third-party/gds/gds-09.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..b94439f211a814f66d41231c9386c57f3ffe8322 Binary files /dev/null and b/docs/en/20-third-party/gds/gds-09.png.webp differ diff --git a/docs/en/20-third-party/gds/gds-10.png.webp b/docs/en/20-third-party/gds/gds-10.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..a63cad9e9a3d412b1132359506530498fb1a0e57 Binary files /dev/null and b/docs/en/20-third-party/gds/gds-10.png.webp differ diff --git a/docs/en/20-third-party/gds/gds-11.png.webp b/docs/en/20-third-party/gds/gds-11.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..fc38cd9a29c00afa48238741c33b439f737a7b8f Binary files /dev/null and b/docs/en/20-third-party/gds/gds-11.png.webp differ diff --git a/docs/en/20-third-party/import_dashboard.webp b/docs/en/20-third-party/import_dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..164e3f4690a5a55f937a3c29e1e8ca026648e6b1 Binary files /dev/null and b/docs/en/20-third-party/import_dashboard.webp differ diff --git a/docs/en/20-third-party/import_dashboard1.webp b/docs/en/20-third-party/import_dashboard1.webp new file mode 100644 index 0000000000000000000000000000000000000000..d4fb374ce8bb75c8a0fbdbb9cab5b30eb29ab06d Binary files /dev/null and b/docs/en/20-third-party/import_dashboard1.webp differ diff --git a/docs/en/20-third-party/import_dashboard2.webp b/docs/en/20-third-party/import_dashboard2.webp new file mode 100644 index 0000000000000000000000000000000000000000..9f74dc96be20ab64b5fb555aaccdaa1c1139b35c Binary files /dev/null and b/docs/en/20-third-party/import_dashboard2.webp differ diff --git a/docs/en/21-tdinternal/01-arch.md b/docs/en/21-tdinternal/01-arch.md index 44651c0496481c410640e577aaad5781f846e302..2f876adffc2543bb9f117e5812ccc5241d7a6d99 100644 --- a/docs/en/21-tdinternal/01-arch.md +++ b/docs/en/21-tdinternal/01-arch.md @@ -12,6 +12,7 @@ The design of TDengine is based on the assumption that any hardware or software Logical structure diagram of TDengine's distributed architecture is as follows: ![TDengine Database architecture diagram](structure.webp) +
    Figure 1: TDengine architecture diagram
    A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDengine client driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit. @@ -38,15 +39,16 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc **Cluster external connection**: TDengine cluster can accommodate a single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter “serverPort” of TDengine will be adopted. -**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode: +**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode: -1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step; +1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step; 2. Check the system configuration file taos.cfg to obtain node configuration parameters “firstEp” and “secondEp” (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step; 3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again. **The choice of MNODE**: TDengine logically has a management node, but there is no separate execution code. The server-side only has one set of execution code, taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, totally transparently and without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage. -**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. +**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. + - Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode" - Step 2: In the system configuration parameter file taos.cfg of the new data node, set the “firstEp” and “secondEp” parameters to the EP of any two data nodes in the existing cluster. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step. @@ -57,6 +59,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process. ![typical process of TDengine Database](message.webp) +
    Figure 2: Typical process of TDengine
    1. Application initiates a request to insert data through JDBC, ODBC, or other APIs. @@ -121,16 +124,17 @@ The load balancing process does not require any manual intervention, and it is t If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Leader and all others are slaves. When the application writes a new record to system, only the Leader vnode can accept the writing request. If a follower vnode receives a writing request, the system will notifies TAOSC to redirect. -### Leader vnode Writing Process +### Leader vnode Writing Process Leader Vnode uses a writing process as follows: ![TDengine Database Leader Writing Process](write_master.webp) +
    Figure 3: TDengine Leader writing process
    1. Leader vnode receives the application data insertion request, verifies, and moves to next step; 2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; -3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data; +3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data; 4. Write into memory and add the record to “skip list”; 5. Leader vnode returns a confirmation message to the application, indicating a successful write. 6. If any of Step 2, 3 or 4 fails, the error will directly return to the application. @@ -140,6 +144,7 @@ Leader Vnode uses a writing process as follows: For a follower vnode, the write process as follows: ![TDengine Database Follower Writing Process](write_slave.webp) +
    Figure 4: TDengine Follower Writing Process
    1. Follower vnode receives a data insertion request forwarded by Leader vnode; @@ -212,6 +217,7 @@ When data is written to disk, the system decideswhether to compress the data bas By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”. dataDir format is as follows: + ``` dataDir data_path [tier_level] ``` @@ -270,6 +276,7 @@ For the data collected by device D1001, the number of records per hour is counte TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable (super table). STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. There can be multiple tags which can be added, deleted and modified at any time. Applications can aggregate or statistically operate on all or a subset of tables under a STABLE by specifying tag filters. This greatly simplifies the development of applications. The process is shown in the following figure: ![TDengine Database Diagram of multi-table aggregation query](multi_tables.webp) +
    Figure 5: Diagram of multi-table aggregation query
    1. Application sends a query condition to system; @@ -279,9 +286,8 @@ TDengine creates a separate table for each data collection point, but in practic 5. Each vnode first finds the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC; 6. TAOSC finally aggregates the results returned by multiple data nodes and send them back to application. -Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details. +Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TDengine SQL for details. ### Precomputation In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the BRIN (Block Range Index) of PostgreSQL. - diff --git a/docs/en/10-cluster/03-high-availability.md b/docs/en/21-tdinternal/03-high-availability.md similarity index 100% rename from docs/en/10-cluster/03-high-availability.md rename to docs/en/21-tdinternal/03-high-availability.md diff --git a/docs/en/10-cluster/04-load-balance.md b/docs/en/21-tdinternal/04-load-balance.md similarity index 100% rename from docs/en/10-cluster/04-load-balance.md rename to docs/en/21-tdinternal/04-load-balance.md diff --git a/docs/en/25-application/01-telegraf.md b/docs/en/25-application/01-telegraf.md index d30a23fe1b942e1411e8b5f1320e1c54ae2b407f..59491152bcda3e26ec12aaa59ac1041ef23c4e7e 100644 --- a/docs/en/25-application/01-telegraf.md +++ b/docs/en/25-application/01-telegraf.md @@ -34,7 +34,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa ### TDengine -Download the latest TDengine-server 2.4.0.x or above from the [Downloads](http://taosdata.com/cn/all-downloads/) page on the TAOSData website and install it. +Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it. ## Data Connection Setup @@ -79,5 +79,5 @@ Click on the plus icon on the left and select `Import` to get the data from `htt ## Wrap-up -The above demonstrates how to quickly build a IT DevOps visualization system. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system in just a few minutes. +The above demonstrates how to quickly build a IT DevOps visualization system. Thanks to the schemaless protocol parsing feature in TDengine and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system in just a few minutes. Please refer to the official documentation and product implementation cases for other features. diff --git a/docs/en/25-application/02-collectd.md b/docs/en/25-application/02-collectd.md index 1733ed1b1af8c9375c3773d1ca86831396499a78..692cd8d929f04a03e4433bd0b71f84101bc362cb 100644 --- a/docs/en/25-application/02-collectd.md +++ b/docs/en/25-application/02-collectd.md @@ -37,7 +37,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa ### Install TDengine -Download the latest TDengine-server 2.4.0.x or above from the [Downloads](http://taosdata.com/cn/all-downloads/) page on the TAOSData website and install it. +Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it. ## Data Connection Setup @@ -99,6 +99,6 @@ Download the dashboard json from `https://github.com/taosdata/grafanaplugin/blob ## Wrap-up -TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system, or adapt an existing system, in just a few minutes. +TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. Thanks to the new schemaless protocol parsing feature in TDengine and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system, or adapt an existing system, in just a few minutes. For TDengine's powerful data writing and querying performance and other features, please refer to the official documentation and successful product implementation cases. diff --git a/docs/en/25-application/03-immigrate.md b/docs/en/25-application/03-immigrate.md index fe67f973894d460fb017de0e1a2099b8441a4abe..1aabaa43e77660d72bca00d7d59cdee69b1a7c92 100644 --- a/docs/en/25-application/03-immigrate.md +++ b/docs/en/25-application/03-immigrate.md @@ -41,7 +41,7 @@ The agents deployed in the application nodes are responsible for providing opera - **TDengine installation and deployment** -First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to the blog ["Installation and Uninstallation of TDengine Multiple Installation Packages"](https://www.taosdata.com/blog/2019/08/09/566.html). +First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to [Install TDengine](../../get-started/package) Note that once the installation is complete, do not start the `taosd` service before properly configuring the parameters. @@ -51,7 +51,7 @@ TDengine version 2.4 and later version includes `taosAdapter`. taosAdapter is a Users can flexibly deploy taosAdapter instances, based on their requirements, to improve data writing throughput and provide guarantees for data writes in different application scenarios. -Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](/reference/taosadapter/). +Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](../../reference/taosadapter/). If using collectd, modify the configuration file in its default location `/etc/collectd/collectd.conf` to point to the IP address and port of the node where to deploy taosAdapter. For example, assuming the taosAdapter IP address is 192.168.1.130 and port 6046, configure it as follows. @@ -411,7 +411,7 @@ TDengine provides a wealth of help documents to explain many aspects of cluster ### Cluster Deployment -The first is TDengine installation. Download the latest stable version of TDengine from the official website, and install it. Please refer to the blog ["Installation and Uninstallation of Various Installation Packages of TDengine"](https://www.taosdata.com/blog/2019/08/09/566.html) for the various installation package formats. +The first is TDengine installation. Download the latest stable version of TDengine from the official website, and install it. Please refer to [Install TDengine](../../get-started/package) for more details. Note that once the installation is complete, do not immediately start the `taosd` service, but start it after correctly configuring the parameters. @@ -419,11 +419,11 @@ Note that once the installation is complete, do not immediately start the `taosd To ensure that the system can obtain the necessary information for regular operation. Please set the following vital parameters correctly on the server: -FQDN, firstEp, secondEP, dataDir, logDir, tmpDir, serverPort. For the specific meaning and setting requirements of each parameter, please refer to the document "[TDengine Cluster Installation and Management](/cluster/)" +FQDN, firstEp, secondEP, dataDir, logDir, tmpDir, serverPort. For the specific meaning and setting requirements of each parameter, please refer to the document "[TDengine Cluster Deployment](../../deployment)" Follow the same steps to set parameters on the other nodes, start the taosd service, and then add Dnodes to the cluster. -Finally, start `taos` and execute the `show dnodes` command. If you can see all the nodes that have joined the cluster, the cluster building process was successfully completed. For specific operation procedures and precautions, please refer to the document "[TDengine Cluster Installation and Management](/cluster/)". +Finally, start `taos` and execute the `show dnodes` command. If you can see all the nodes that have joined the cluster, the cluster building process was successfully completed. For specific operation procedures and precautions, please refer to the document "[TDengine Cluster Deployment](../../deployment)". ## Appendix 4: Super Table Names @@ -431,5 +431,5 @@ Since OpenTSDB's metric name has a dot (".") in it, for example, a metric with a ## Appendix 5: Reference Articles -1. [Using TDengine + collectd/StatsD + Grafana to quickly build an IT operation and maintenance monitoring system](/application/collectd/) -2. [Write collected data directly to TDengine through collectd](/third-party/collectd/) +1. [Using TDengine + collectd/StatsD + Grafana to quickly build an IT operation and maintenance monitoring system](../collectd/) +2. [Write collected data directly to TDengine through collectd](../collectd/) diff --git a/docs/en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp b/docs/en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp index 147a65b17bff2aa0e44faa206618bdce5664e1ca..a8b52f3b2d6efe2b83c087d81b98c89fa941f263 100644 Binary files a/docs/en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp and b/docs/en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp differ diff --git a/docs/en/25-application/IT-DevOps-Solutions-Telegraf.webp b/docs/en/25-application/IT-DevOps-Solutions-Telegraf.webp index fd5461ec9b37be66cac4c17fb1f81fec76158330..fac96f4eb63232c405bfb5e09eda7ed00901a7b5 100644 Binary files a/docs/en/25-application/IT-DevOps-Solutions-Telegraf.webp and b/docs/en/25-application/IT-DevOps-Solutions-Telegraf.webp differ diff --git a/docs/en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp b/docs/en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp index 105afcdb8312b23675f62ff6339d5e737b5cd958..fac96f4eb63232c405bfb5e09eda7ed00901a7b5 100644 Binary files a/docs/en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp and b/docs/en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp differ diff --git a/docs/en/27-train-faq/01-faq.md b/docs/en/27-train-faq/01-faq.md index c10bca1d05edd8cebe451901b3abb91923618a26..733b4184741ec3bdcea5ae5ef4b236493a03be35 100644 --- a/docs/en/27-train-faq/01-faq.md +++ b/docs/en/27-train-faq/01-faq.md @@ -1,114 +1,163 @@ --- -sidebar_label: FAQ title: Frequently Asked Questions --- ## Submit an Issue -If the tips in FAQ don't help much, please submit an issue on [GitHub](https://github.com/taosdata/TDengine) to describe your problem. In your description please include the TDengine version, hardware and OS information, the steps to reproduce the problem and any other relevant information. It would be very helpful if you can package the contents in `/var/log/taos` and `/etc/taos` and upload. These two are the default directories used by TDengine. If you have changed the default directories in your configuration, please package the files in your configured directories. We recommended setting `debugFlag` to 135 in `taos.cfg`, restarting `taosd`, then reproducing the problem and collecting the logs. If you don't want to restart, an alternative way of setting `debugFlag` is executing `alter dnode debugFlag 135` command in TDengine CLI `taos`. During normal running, however, please make sure `debugFlag` is set to 131. +If your issue could not be resolved by reviewing this documentation, you can submit your issue on GitHub and receive support from the TDengine Team. When you submit an issue, attach the following directories from your TDengine deployment: -## Frequently Asked Questions +1. The directory containing TDengine logs (`/var/log/taos` by default) +2. The directory containing TDengine configuration files (`/etc/taos` by default) -### 1. How to upgrade to TDengine 2.0 from older version? +In your GitHub issue, provide the version of TDengine and the operating system and environment for your deployment, the operations that you performed when the issue occurred, and the time of occurrence and affected tables. -version 2.x is not compatible with version 1.x. With regard to the configuration and data files, please perform the following steps before upgrading. Please follow data integrity, security, backup and other relevant SOPs, best practices before removing/deleting any data. +To obtain more debugging information, open `taos.cfg` and set the `debugFlag` parameter to `135`. Then restart TDengine Server and reproduce the issue. The debug-level logs generated help the TDengine Team to resolve your issue. If it is not possible to restart TDengine Server, you can run the following command in the TDengine CLI to set the debug flag: -1. Delete configuration files: `sudo rm -rf /etc/taos/taos.cfg` -2. Delete log files: `sudo rm -rf /var/log/taos/` -3. Delete data files if the data doesn't need to be kept: `sudo rm -rf /var/lib/taos/` -4. Install latest 2.x version -5. If the data needs to be kept and migrated to newer version, please contact professional service at TDengine for assistance. +``` + alter dnode 'debugFlag' '135'; +``` -### 2. How to handle "Unable to establish connection"? +You can run the `SHOW DNODES` command to determine the dnode ID. -When the client is unable to connect to the server, you can try the following ways to troubleshoot and resolve the problem. +When debugging information is no longer needed, set `debugFlag` to 131. -1. Check the network +## Frequently Asked Questions - - Check if the hosts where the client and server are running are accessible to each other, for example by `ping` command. - - Check if the TCP/UDP on port 6030-6042 are open for access if firewall is enabled. If possible, disable the firewall for diagnostics, but please ensure that you are following security and other relevant protocols. - - Check if the FQDN and serverPort are configured correctly in `taos.cfg` used by the server side. - - Check if the `firstEp` is set properly in the `taos.cfg` used by the client side. +### 1. What are the best practices for upgrading a previous version of TDengine to version 3.0? -2. Make sure the client version and server version are same. +TDengine 3.0 is not compatible with the configuration and data files from previous versions. Before upgrading, perform the following steps: -3. On server side, check the running status of `taosd` by executing `systemctl status taosd` . If your server is started using another way instead of `systemctl`, use the proper method to check whether the server process is running normally. +1. Run `sudo rm -rf /etc/taos/taos.cfg` to delete your configuration file. +2. Run `sudo rm -rf /var/log/taos/` to delete your log files. +3. Run `sudo rm -rf /var/lib/taos/` to delete your data files. +4. Install TDengine 3.0. +5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support). -4. If using connector of Python, Java, Go, Rust, C#, node.JS on Linux to connect to the server, please make sure `libtaos.so` is in directory `/usr/local/taos/driver` and `/usr/local/taos/driver` is in system lib search environment variable `LD_LIBRARY_PATH`. +### 4. How can I resolve the "Unable to establish connection" error? -5. If using connector on Windows, please make sure `C:\TDengine\driver\taos.dll` is in your system lib search path. We recommend putting `taos.dll` under `C:\Windows\System32`. +This error indicates that the client could not connect to the server. Perform the following troubleshooting steps: -6. Some advanced network diagnostics tools +1. Check the network. - - On Linux system tool `nc` can be used to check whether the TCP/UDP can be accessible on a specified port - Check whether a UDP port is open: `nc -vuz {hostIP} {port} ` - Check whether a TCP port on server side is open: `nc -l {port}` - Check whether a TCP port on client side is open: `nc {hostIP} {port}` + - For machines deployed in the cloud, verify that your security group can access ports 6030 and 6031 (TCP and UDP). + - For virtual machines deployed locally, verify that the hosts where the client and server are running are accessible to each other. Do not use localhost as the hostname. + - For machines deployed on a corporate network, verify that your NAT configuration allows the server to respond to the client. - - On Windows system `Test-NetConnection -ComputerName {fqdn} -Port {port}` on PowerShell can be used to check whether the port on server side is open for access. +2. Verify that the client and server are running the same version of TDengine. -7. TDengine CLI `taos` can also be used to check network, please refer to [TDengine CLI](/reference/taos-shell). +3. On the server, run `systemctl status taosd` to verify that taosd is running normally. If taosd is stopped, run `systemctl start taosd`. -### 3. How to handle "Unexpected generic error in RPC" or "Unable to resolve FQDN" ? +4. Verify that the client is configured with the correct FQDN for the server. -This error is caused because the FQDN can't be resolved. Please try following ways: +5. If the server cannot be reached with the `ping` command, verify that network and DNS or hosts file settings are correct. For a TDengine cluster, the client must be able to ping the FQDN of every node in the cluster. -1. Check whether the FQDN is configured properly on the server side -2. If DSN server is configured in the network, please check whether it works; otherwise, check `/etc/hosts` to see whether the FQDN is configured with correct IP -3. If the network configuration on the server side is OK, try to ping the server from the client side. -4. If TDengine has been used before with an old hostname then the hostname has been changed, please check `/var/lib/taos/taos/dnode/dnodeEps.json`. Before setting up a new TDengine cluster, it's better to cleanup the directories configured. +6. Verify that your firewall settings allow all hosts in the cluster to communicate on ports 6030 and 6041 (TCP and UDP). You can run `ufw status` (Ubuntu) or `firewall-cmd --list-port` (CentOS) to check the configuration. -### 4. "Invalid SQL" is returned even though the Syntax is correct +7. If you are using the Python, Java, Go, Rust, C#, or Node.js connector on Linux to connect to the server, verify that `libtaos.so` is in the `/usr/local/taos/driver` directory and `/usr/local/taos/driver` is in the `LD_LIBRARY_PATH` environment variable. -"Invalid SQL" is returned when the length of SQL statement exceeds maximum allowed length or the syntax is not correct. +8. If you are using Windows, verify that `C:\TDengine\driver\taos.dll` is in the `PATH` environment variable. If possible, move `taos.dll` to the `C:\Windows\System32` directory. -### 5. Whether validation queries are supported? +9. On Linux systems, you can use the `nc` tool to check whether a port is accessible: + - To check whether a UDP port is open, run `nc -vuz {hostIP} {port}`. + - To check whether a TCP port on the server side is open, run `nc -l {port}`. + - To check whether a TCP port on client side is open, run `nc {hostIP} {port}`. -It's suggested to use a builtin database named as `log` to monitor. +10. On Windows systems, you can run `Test-NetConnection -ComputerName {fqdn} -Port {port}` in PowerShell to check whether a port on the server side is accessible. - +11. You can also use the TDengine CLI to diagnose network issues. For more information, see [Problem Diagnostics](https://docs.tdengine.com/operation/diagnose/). -### 6. Can I delete a record? +### 5. How can I resolve the "Unable to resolve FQDN" error? -From version 2.6.0.0 Enterprise version, deleting data can be supported. +Clients and dnodes must be able to resolve the FQDN of each required node. You can confirm your configuration as follows: -### 7. How to create a table of over 1024 columns? +1. Verify that the FQDN is configured properly on the server. +2. If your network has a DNS server, verify that it is operational. +3. If your network does not have a DNS server, verify that the FQDNs in the `hosts` file are correct. +4. On the client, use the `ping` command to test your connection to the server. If you cannot ping an FQDN, TDengine cannot reach it. +5. If TDengine has been previously installed and the `hostname` was modified, open `dnode.json` in the `data` folder and verify that the endpoint configuration is correct. The default location of the dnode file is `/var/lib/taos/dnode`. Ensure that you clean up previous installations before reinstalling TDengine. +6. Confirm whether FQDNs are preconfigured in `/etc/hosts` and `/etc/hostname`. -From version 2.1.7.0, at most 4096 columns can be defined for a table. +### 6. What is the most effective way to write data to TDengine? -### 8. How to improve the efficiency of inserting data? +Writing data in batches provides higher efficiency in most situations. You can insert one or more data records into one or more tables in a single SQL statement. -Inserting data in batch is a good practice. Single SQL statement can insert data for one or multiple tables in batch. +### 9. Why are table names not fully displayed? -### 9. JDBC Error: the executed SQL is not a DML or a DDL? +The number of columns in the TDengine CLI terminal display is limited. This can cause table names to be cut off, and if you use an incomplete name in a statement, the "Table does not exist" error will occur. You can increase the display size with the `maxBinaryDisplayWidth` parameter or the SQL statement `set max_binary_display_width`. You can also append `\G` to your SQL statement to bypass this limitation. -Please upgrade to latest JDBC driver, for details please refer to [Java Connector](/reference/connector/java) +### 10. How can I migrate data? -### 10. Failed to connect with error "invalid timestamp" +In TDengine, the `hostname` uniquely identifies a machine. When you move data files to a new machine, you must configure the new machine to have the same `host name` as the original machine. -The most common reason is that the time setting is not aligned on the client side and the server side. On Linux system, please use `ntpdate` command. On Windows system, please enable automatic sync in system time setting. +:::note -### 11. Table name is not shown in full +The data structure of previous versions of TDengine is not compatible with version 3.0. To migrate from TDengine 1.x or 2.x to 3.0, you must export data from your older deployment and import it back into TDengine 3.0. -There is a display width setting in TDengine CLI `taos`. It can be controlled by configuration parameter `maxBinaryDisplayWidth`, or can be set using SQL command `set max_binary_display_width`. A more convenient way is to append `\G` in a SQL command to bypass this limitation. +::: -### 12. How to change log level temporarily? +### 11. How can I temporary change the log level from the TDengine Client? -Below SQL command can be used to adjust log level temporarily +To change the log level for debugging purposes, you can use the following command: ```sql -ALTER LOCAL flag_name flag_value; +ALTER LOCAL local_option + +local_option: { + 'resetLog' + | 'rpcDebugFlag' value + | 'tmrDebugFlag' value + | 'cDebugFlag' value + | 'uDebugFlag' value + | 'debugFlag' value +} ``` - - flag_name can be: debugFlag,cDebugFlag,tmrDebugFlag,uDebugFlag,rpcDebugFlag - - flag_value can be: 131 (INFO/WARNING/ERROR), 135 (plus DEBUG), 143 (plus TRACE) - +Use `resetlog` to remove all logs generated on the local client. Use the other parameters to specify a log level for a specific component. -### 13. What to do if go compilation fails? +For each parameter, you can set the value to `131` (error and warning), `135` (error, warning, and debug), or `143` (error, warning, debug, and trace). -From version 2.3.0.0, a new component named `taosAdapter` is introduced. Its' developed in Go. If you want to compile from source code and meet go compilation problems, try to do below steps to resolve Go environment problems. +### Why do TDengine components written in Go fail to compile? -```sh -go env -w GO111MODULE=on -go env -w GOPROXY=https://goproxy.cn,direct -``` +TDengine includes taosAdapter, an independent component written in Go. This component provides the REST API as well as data access for other products such as Prometheus and Telegraf. +When using the develop branch, you must run `git submodule update --init --recursive` to download the taosAdapter repository and then compile it. + +TDengine Go components require Go version 1.14 or later. + +### 13. How can I query the storage space being used by my data? + +The TDengine data files are stored in `/var/lib/taos` by default. Log files are stored in `/var/log/taos`. + +To see how much space your data files occupy, run `du -sh /var/lib/taos/vnode --exclude='wal'`. This excludes the write-ahead log (WAL) because its size is relatively fixed while writes are occurring, and it is written to disk and cleared when you shut down TDengine. + +If you want to see how much space is occupied by a single database, first determine which vgroup is storing the database by running `show vgroups`. Then check `/var/lib/taos/vnode` for the files associated with the vgroup ID. + +### 15. How is timezone information processed for timestamps? + +TDengine uses the timezone of the client for timestamps. The server timezone does not affect timestamps. The client converts Unix timestamps in SQL statements to UTC before sending them to the server. When you query data on the server, it provides timestamps in UTC to the client, which converts them to its local time. + +Timestamps are processed as follows: + +1. The client uses its system timezone unless it has been configured otherwise. +2. A timezone configured in `taos.cfg` takes precedence over the system timezone. +3. A timezone explicitly specified when establishing a connection to TDengine through a connector takes precedence over `taos.cfg` and the system timezone. For example, the Java connector allows you to specify a timezone in the JDBC URL. +4. If you use an RFC 3339 timestamp (2013-04-12T15:52:01.123+08:00), or an ISO 8601 timestamp (2013-04-12T15:52:01.123+0800), the timezone specified in the timestamp is used instead of the timestamps configured using any other method. + +### 16. Which network ports are required by TDengine? + +See [serverPort](https://docs.tdengine.com/reference/config/#serverport) in Configuration Parameters. + +Note that ports are specified using 6030 as the default first port. If you change this port, all other ports change as well. + +### 17. Why do applications such as Grafana fail to connect to TDengine over the REST API? + +In TDengine, the REST API is provided by taosAdapter. Ensure that taosAdapter is running before you connect an application to TDengine over the REST API. You can run `systemctl start taosadapter` to start the service. + +Note that the log path for taosAdapter must be configured separately. The default path is `/var/log/taos`. You can choose one of eight log levels. The default is `info`. You can set the log level to `panic` to disable log output. You can modify the taosAdapter configuration file to change these settings. The default location is `/etc/taos/taosadapter.toml`. + +For more information, see [taosAdapter](https://docs.tdengine.com/reference/taosadapter/). + +### 18. How can I resolve out-of-memory (OOM) errors? + +OOM errors are thrown by the operating system when its memory, including swap, becomes insufficient and it needs to terminate processes to remain operational. Most OOM errors in TDengine occur for one of the following reasons: free memory is less than the value of `vm.min_free_kbytes` or free memory is less than the size of the request. If TDengine occupies reserved memory, an OOM error can occur even when free memory is sufficient. + +TDengine preallocates memory to each vnode. The number of vnodes per database is determined by the `vgroups` parameter, and the amount of memory per vnode is determined by the `buffer` parameter. To prevent OOM errors from occurring, ensure that you prepare sufficient memory on your hosts to support the number of vnodes that your deployment requires. Configure an appropriately sized swap space. If you continue to receive OOM errors, your SQL statements may be querying too much data for your system. TDengine Enterprise Edition includes optimized memory management that increases stability for enterprise customers. diff --git a/docs/en/27-train-faq/03-docker.md b/docs/en/27-train-faq/03-docker.md deleted file mode 100644 index 0378fffb8bdbc4cae8d4d2176ec3d745a548c2fe..0000000000000000000000000000000000000000 --- a/docs/en/27-train-faq/03-docker.md +++ /dev/null @@ -1,285 +0,0 @@ ---- -sidebar_label: TDengine in Docker -title: Deploy TDengine in Docker ---- - -We do not recommend deploying TDengine using Docker in a production system. However, Docker is still very useful in a development environment, especially when your host is not Linux. From version 2.0.14.0, the official image of TDengine can support X86-64, X86, arm64, and rm32 . - -In this chapter we introduce a simple step by step guide to use TDengine in Docker. - -## Install Docker - -To install Docker please refer to [Get Docker](https://docs.docker.com/get-docker/). - -After Docker is installed, you can check whether Docker is installed properly by displaying Docker version. - -```bash -$ docker -v -Docker version 20.10.3, build 48d30b5 -``` - -## Launch TDengine in Docker - -### Launch TDengine Server - -```bash -$ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine -526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd -``` - -In the above command, a docker container is started to run TDengine server, the port range 6030-6049 of the container is mapped to host port range 6030-6049. If port range 6030-6049 has been occupied on the host, please change to an available host port range. For port requirements on the host, please refer to [Port Configuration](/reference/config/#serverport). - -- **docker run**: Launch a docker container -- **-d**: the container will run in background mode -- **-p**: port mapping -- **tdengine/tdengine**: The image from which to launch the container -- **526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd**: the container ID if successfully launched. - -Furthermore, `--name` can be used with `docker run` to specify name for the container, `--hostname` can be used to specify hostname for the container, `-v` can be used to mount local volumes to the container so that the data generated inside the container can be persisted to disk on the host. - -```bash -docker run -d --name tdengine --hostname="tdengine-server" -v ~/work/taos/log:/var/log/taos -v ~/work/taos/data:/var/lib/taos -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine -``` - -- **--name tdengine**: specify the name of the container, the name can be used to specify the container later -- **--hostname=tdengine-server**: specify the hostname inside the container, the hostname can be used inside the container without worrying the container IP may vary -- **-v**: volume mapping between host and container - -### Check the container - -```bash -docker ps -``` - -The output is like below: - -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS ··· -c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ··· -``` - -- **docker ps**: List all the containers -- **CONTAINER ID**: Container ID -- **IMAGE**: The image used for the container -- **COMMAND**: The command used when launching the container -- **CREATED**: When the container was created -- **STATUS**: Status of the container - -### Access TDengine inside container - -```bash -$ docker exec -it tdengine /bin/bash -root@tdengine-server:~/TDengine-server-2.4.0.4# -``` - -- **docker exec**: Attach to the container -- **-i**: Interactive mode -- **-t**: Use terminal -- **tdengine**: Container name, up to the output of `docker ps` -- **/bin/bash**: The command to execute once the container is attached - -Inside the container, start TDengine CLI `taos` - -```bash -root@tdengine-server:~/TDengine-server-2.4.0.4# taos - -Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 -Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. - -taos> -``` - -The above example is for a successful connection. If `taos` fails to connect to the server side, error information would be shown. - -In TDengine CLI, SQL commands can be executed to create/drop databases, tables, STables, and insert or query data. For details please refer to [TAOS SQL](/taos-sql/). - -### Access TDengine from host - -If option `-p` used to map ports properly between host and container, it's also able to access TDengine in container from the host as long as `firstEp` is configured correctly for the client on host. - -``` -$ taos - -Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 -Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. - -taos> -``` - -It's also able to access the REST interface provided by TDengine in container from the host. - -``` -curl -L -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql -``` - -Output is like below: - -``` -{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2} -``` - -For details of REST API please refer to [REST API](/reference/rest-api/). - -### Run TDengine server and taosAdapter inside container - -From version 2.4.0.0, in the TDengine Docker image, `taosAdapter` is enabled by default, but can be disabled using environment variable `TAOS_DISABLE_ADAPTER=true` . `taosAdapter` can also be run alone without `taosd` when launching a container. - -For the port mapping of `taosAdapter`, please refer to [taosAdapter](/reference/taosadapter/). - -- Run both `taosd` and `taosAdapter` (by default) in docker container: - -```bash -docker run -d --name tdengine-all -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine:2.4.0.4 -``` - -- Run `taosAdapter` only in docker container, `TAOS_FIRST_EP` environment variable needs to be used to specify the container name in which `taosd` is running: - -```bash -docker run -d --name tdengine-taosa -p 6041-6049:6041-6049 -p 6041-6049:6041-6049/udp -e TAOS_FIRST_EP=tdengine-all tdengine/tdengine:2.4.0.4 taosadapter -``` - -- Run `taosd` only in docker container: - -```bash -docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-6042/udp -e TAOS_DISABLE_ADAPTER=true tdengine/tdengine:2.4.0.4 -``` - -- Verify the REST interface: - -```bash -curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" 127.0.0.1:6041/rest/sql -``` - -Below is an example output: - -``` -{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2021-12-28 09:18:55.765",10,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1} -``` - -### Use taosBenchmark on host to access TDengine server in container - -1. Run `taosBenchmark`, named as `taosdemo` previously, on the host: - - ```bash - $ taosBenchmark - - taosBenchmark is simulating data generated by power equipments monitoring... - - host: 127.0.0.1:6030 - user: root - password: taosdata - configDir: - resultFile: ./output.txt - thread num of insert data: 10 - thread num of create table: 10 - top insert interval: 0 - number of records per req: 30000 - max sql length: 1048576 - database count: 1 - database[0]: - database[0] name: test - drop: yes - replica: 1 - precision: ms - super table count: 1 - super table[0]: - stbName: meters - autoCreateTable: no - childTblExists: no - childTblCount: 10000 - childTblPrefix: d - dataSource: rand - iface: taosc - insertRows: 10000 - interlaceRows: 0 - disorderRange: 1000 - disorderRatio: 0 - maxSqlLen: 1048576 - timeStampStep: 1 - startTimestamp: 2017-07-14 10:40:00.000 - sampleFormat: - sampleFile: - tagsFile: - columnCount: 3 - column[0]:FLOAT column[1]:INT column[2]:FLOAT - tagCount: 2 - tag[0]:INT tag[1]:BINARY(16) - - Press enter key to continue or Ctrl-C to stop - ``` - - Once the execution is finished, a database `test` is created, a STable `meters` is created in database `test`, 10,000 sub tables are created using `meters` as template, named as "d0" to "d9999", while 10,000 rows are inserted into each table, so totally 100,000,000 rows are inserted. - -2. Check the data - - - **Check database** - - ```bash - $ taos> show databases; - name | created_time | ntables | vgroups | ··· - test | 2021-08-18 06:01:11.021 | 10000 | 6 | ··· - log | 2021-08-18 05:51:51.065 | 4 | 1 | ··· - - ``` - - - **Check STable** - - ```bash - $ taos> use test; - Database changed. - - $ taos> show stables; - name | created_time | columns | tags | tables | - ============================================================================================ - meters | 2021-08-18 06:01:11.116 | 4 | 2 | 10000 | - Query OK, 1 row(s) in set (0.003259s) - - ``` - - - **Check Tables** - - ```bash - $ taos> select * from test.t0 limit 10; - - DB error: Table does not exist (0.002857s) - taos> select * from test.d0 limit 10; - ts | current | voltage | phase | - ====================================================================================== - 2017-07-14 10:40:00.000 | 10.12072 | 223 | 0.34167 | - 2017-07-14 10:40:00.001 | 10.16103 | 224 | 0.34445 | - 2017-07-14 10:40:00.002 | 10.00204 | 220 | 0.33334 | - 2017-07-14 10:40:00.003 | 10.00030 | 220 | 0.33333 | - 2017-07-14 10:40:00.004 | 9.84029 | 216 | 0.32222 | - 2017-07-14 10:40:00.005 | 9.88028 | 217 | 0.32500 | - 2017-07-14 10:40:00.006 | 9.88110 | 217 | 0.32500 | - 2017-07-14 10:40:00.007 | 10.08137 | 222 | 0.33889 | - 2017-07-14 10:40:00.008 | 10.12063 | 223 | 0.34167 | - 2017-07-14 10:40:00.009 | 10.16086 | 224 | 0.34445 | - Query OK, 10 row(s) in set (0.016791s) - - ``` - - - **Check tag values of table d0** - - ```bash - $ taos> select groupid, location from test.d0; - groupid | location | - ================================= - 0 | California.SanDiego | - Query OK, 1 row(s) in set (0.003490s) - ``` - -### Access TDengine from 3rd party tools - -A lot of 3rd party tools can be used to write data into TDengine through `taosAdapter`, for details please refer to [3rd party tools](/third-party/). - -There is nothing different from the 3rd party side to access TDengine server inside a container, as long as the end point is specified correctly, the end point should be the FQDN and the mapped port of the host. - -## Stop TDengine inside container - -```bash -docker stop tdengine -``` - -- **docker stop**: stop a container -- **tdengine**: container name diff --git a/docs/en/28-releases.md b/docs/en/28-releases.md new file mode 100644 index 0000000000000000000000000000000000000000..a0c9eb119999571fb973b5e2243f237b8833b167 --- /dev/null +++ b/docs/en/28-releases.md @@ -0,0 +1,9 @@ +--- +sidebar_label: Releases +title: Released Versions +--- + +import Release from "/components/ReleaseV3"; + + + diff --git a/docs/examples/c/tmq_example.c b/docs/examples/c/tmq_example.c new file mode 100644 index 0000000000000000000000000000000000000000..19adaad116ef65673f5541b5216ce12d2d9151c7 --- /dev/null +++ b/docs/examples/c/tmq_example.c @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include "taos.h" + +static int running = 1; +static char dbName[64] = "tmqdb"; +static char stbName[64] = "stb"; +static char topicName[64] = "topicname"; + +static int32_t msg_process(TAOS_RES* msg) { + char buf[1024]; + int32_t rows = 0; + + const char* topicName = tmq_get_topic_name(msg); + const char* dbName = tmq_get_db_name(msg); + int32_t vgroupId = tmq_get_vgroup_id(msg); + + printf("topic: %s\n", topicName); + printf("db: %s\n", dbName); + printf("vgroup id: %d\n", vgroupId); + + while (1) { + TAOS_ROW row = taos_fetch_row(msg); + if (row == NULL) break; + + TAOS_FIELD* fields = taos_fetch_fields(msg); + int32_t numOfFields = taos_field_count(msg); + int32_t* length = taos_fetch_lengths(msg); + int32_t precision = taos_result_precision(msg); + rows++; + taos_print_row(buf, row, fields, numOfFields); + printf("row content: %s\n", buf); + } + + return rows; +} + +static int32_t init_env() { + TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); + if (pConn == NULL) { + return -1; + } + + TAOS_RES* pRes; + // drop database if exists + printf("create database\n"); + pRes = taos_query(pConn, "drop database if exists tmqdb"); + if (taos_errno(pRes) != 0) { + printf("error in drop tmqdb, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + // create database + pRes = taos_query(pConn, "create database tmqdb"); + if (taos_errno(pRes) != 0) { + printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + // create super table + printf("create super table\n"); + pRes = taos_query( + pConn, "create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table stb, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + // create sub tables + printf("create sub tables\n"); + pRes = taos_query(pConn, "create table tmqdb.ctb0 using tmqdb.stb tags(0, 'subtable0')"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table ctb0, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create table tmqdb.ctb1 using tmqdb.stb tags(1, 'subtable1')"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table ctb1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create table tmqdb.ctb2 using tmqdb.stb tags(2, 'subtable2')"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table ctb2, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create table tmqdb.ctb3 using tmqdb.stb tags(3, 'subtable3')"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table ctb3, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + // insert data + printf("insert data into sub tables\n"); + pRes = taos_query(pConn, "insert into tmqdb.ctb0 values(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into tmqdb.ctb1 values(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into tmqdb.ctb2 values(now, 2, 2, 'a1')(now+1s, 22, 22, 'a22')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into tmqdb.ctb3 values(now, 3, 3, 'a1')(now+1s, 33, 33, 'a33')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + taos_close(pConn); + return 0; +} + +int32_t create_topic() { + printf("create topic\n"); + TAOS_RES* pRes; + TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); + if (pConn == NULL) { + return -1; + } + + pRes = taos_query(pConn, "use tmqdb"); + if (taos_errno(pRes) != 0) { + printf("error in use tmqdb, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3, tbname from tmqdb.stb where c1 > 1"); + if (taos_errno(pRes) != 0) { + printf("failed to create topic topicname, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + taos_close(pConn); + return 0; +} + +void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { + printf("tmq_commit_cb_print() code: %d, tmq: %p, param: %p\n", code, tmq, param); +} + +tmq_t* build_consumer() { + tmq_conf_res_t code; + tmq_conf_t* conf = tmq_conf_new(); + code = tmq_conf_set(conf, "enable.auto.commit", "true"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "auto.commit.interval.ms", "1000"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "group.id", "cgrpName"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "client.id", "user defined name"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "td.connect.user", "root"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "td.connect.pass", "taosdata"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "auto.offset.reset", "earliest"); + if (TMQ_CONF_OK != code) return NULL; + code = tmq_conf_set(conf, "experimental.snapshot.enable", "false"); + if (TMQ_CONF_OK != code) return NULL; + + tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); + + tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); + tmq_conf_destroy(conf); + return tmq; +} + +tmq_list_t* build_topic_list() { + tmq_list_t* topicList = tmq_list_new(); + int32_t code = tmq_list_append(topicList, "topicname"); + if (code) { + return NULL; + } + return topicList; +} + +void basic_consume_loop(tmq_t* tmq) { + int32_t totalRows = 0; + int32_t msgCnt = 0; + int32_t timeout = 5000; + while (running) { + TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeout); + if (tmqmsg) { + msgCnt++; + totalRows += msg_process(tmqmsg); + taos_free_result(tmqmsg); + } else { + break; + } + } + + fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); +} + +int main(int argc, char* argv[]) { + int32_t code; + + if (init_env() < 0) { + return -1; + } + + if (create_topic() < 0) { + return -1; + } + + tmq_t* tmq = build_consumer(); + if (NULL == tmq) { + fprintf(stderr, "%% build_consumer() fail!\n"); + return -1; + } + + tmq_list_t* topic_list = build_topic_list(); + if (NULL == topic_list) { + return -1; + } + + if ((code = tmq_subscribe(tmq, topic_list))) { + fprintf(stderr, "%% Failed to tmq_subscribe(): %s\n", tmq_err2str(code)); + } + tmq_list_destroy(topic_list); + + basic_consume_loop(tmq); + + code = tmq_consumer_close(tmq); + if (code) { + fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code)); + } else { + fprintf(stderr, "%% Consumer closed\n"); + } + + return 0; +} diff --git a/docs/examples/java/pom.xml b/docs/examples/java/pom.xml index a48ba398da92f401235819d067aa2ba6f8b173ea..634c3f75a8386db4caab5c1d598f89dc93926c54 100644 --- a/docs/examples/java/pom.xml +++ b/docs/examples/java/pom.xml @@ -21,7 +21,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.38 + 3.0.0 diff --git a/docs/examples/java/src/main/java/com/taos/example/Meters.java b/docs/examples/java/src/main/java/com/taos/example/Meters.java new file mode 100644 index 0000000000000000000000000000000000000000..0f1eadd55b5b197c8e9ecbc415e78fc28e2cb61b --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/Meters.java @@ -0,0 +1,62 @@ +package com.taos.example; + +import java.sql.Timestamp; + +public class Meters { + private Timestamp ts; + private float current; + private int voltage; + private int groupid; + private String location; + + public Timestamp getTs() { + return ts; + } + + public void setTs(Timestamp ts) { + this.ts = ts; + } + + public float getCurrent() { + return current; + } + + public void setCurrent(float current) { + this.current = current; + } + + public int getVoltage() { + return voltage; + } + + public void setVoltage(int voltage) { + this.voltage = voltage; + } + + public int getGroupid() { + return groupid; + } + + public void setGroupid(int groupid) { + this.groupid = groupid; + } + + public String getLocation() { + return location; + } + + public void setLocation(String location) { + this.location = location; + } + + @Override + public String toString() { + return "Meters{" + + "ts=" + ts + + ", current=" + current + + ", voltage=" + voltage + + ", groupid=" + groupid + + ", location='" + location + '\'' + + '}'; + } +} diff --git a/docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java b/docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java new file mode 100644 index 0000000000000000000000000000000000000000..9b7fa35e90fb4f66d2885af0b2a2f1a429a97127 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java @@ -0,0 +1,6 @@ +package com.taos.example; + +import com.taosdata.jdbc.tmq.ReferenceDeserializer; + +public class MetersDeserializer extends ReferenceDeserializer { +} \ No newline at end of file diff --git a/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java b/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java index af97fe4373ca964260e5614f133f359e229b0e15..9d85bf2a94abda71bcdab89d46008b70e52ce437 100644 --- a/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java +++ b/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java @@ -16,14 +16,14 @@ public class RestInsertExample { private static List getRawData() { return Arrays.asList( - "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2", - "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2", - "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2", - "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3", - "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2", - "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2", - "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3", - "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3" + "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,'California.SanFrancisco',2", + "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,'California.SanFrancisco',2", + "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,'California.SanFrancisco',2", + "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,'California.SanFrancisco',3", + "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,'California.LosAngeles',2", + "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,'California.LosAngeles',2", + "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,'California.LosAngeles',3", + "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,'California.LosAngeles',3" ); } diff --git a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java index d82d03b9de561e3ea6a8e7d40a48ce9dd3f2a20d..179e6e6911185631901b79e34a343967e73c4936 100644 --- a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java +++ b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java @@ -1,65 +1,78 @@ package com.taos.example; -import com.taosdata.jdbc.TSDBConnection; -import com.taosdata.jdbc.TSDBDriver; -import com.taosdata.jdbc.TSDBResultSet; -import com.taosdata.jdbc.TSDBSubscribe; +import com.taosdata.jdbc.tmq.ConsumerRecords; +import com.taosdata.jdbc.tmq.TMQConstants; +import com.taosdata.jdbc.tmq.TaosConsumer; import java.sql.Connection; import java.sql.DriverManager; -import java.sql.ResultSetMetaData; import java.sql.SQLException; +import java.sql.Statement; +import java.time.Duration; +import java.util.Collections; import java.util.Properties; -import java.util.concurrent.TimeUnit; +import java.util.Timer; +import java.util.TimerTask; +import java.util.concurrent.atomic.AtomicBoolean; public class SubscribeDemo { - private static final String topic = "topic-meter-current-bg-10"; - private static final String sql = "select * from meters where current > 10"; + private static final String TOPIC = "tmq_topic"; + private static final String DB_NAME = "meters"; + private static final AtomicBoolean shutdown = new AtomicBoolean(false); public static void main(String[] args) { - Connection connection = null; - TSDBSubscribe subscribe = null; - + Timer timer = new Timer(); + timer.schedule(new TimerTask() { + public void run() { + shutdown.set(true); + } + }, 3_000); try { + // prepare Class.forName("com.taosdata.jdbc.TSDBDriver"); + String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata"; + Connection connection = DriverManager.getConnection(jdbcUrl); + try (Statement statement = connection.createStatement()) { + statement.executeUpdate("drop topic if exists " + TOPIC); + statement.executeUpdate("drop database if exists " + DB_NAME); + statement.executeUpdate("create database " + DB_NAME); + statement.executeUpdate("use " + DB_NAME); + statement.executeUpdate( + "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT) TAGS (`groupid` INT, `location` BINARY(16))"); + statement.executeUpdate("CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')"); + statement.executeUpdate("INSERT INTO `d0` values(now - 10s, 0.32, 116)"); + statement.executeUpdate("INSERT INTO `d0` values(now - 8s, NULL, NULL)"); + statement.executeUpdate( + "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119)"); + statement.executeUpdate( + "INSERT INTO `d1` values (now-8s, 10, 120) (now - 6s, 10, 119) (now - 4s, 11.2, 118)"); + // create topic + statement.executeUpdate("create topic " + TOPIC + " as select * from meters"); + } + + // create consumer Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/power?user=root&password=taosdata"; - connection = DriverManager.getConnection(jdbcUrl, properties); - // create subscribe - subscribe = ((TSDBConnection) connection).subscribe(topic, sql, true); - int count = 0; - while (count < 10) { - // wait 1 second to avoid frequent calls to consume - TimeUnit.SECONDS.sleep(1); - // consume - TSDBResultSet resultSet = subscribe.consume(); - if (resultSet == null) { - continue; - } - ResultSetMetaData metaData = resultSet.getMetaData(); - while (resultSet.next()) { - int columnCount = metaData.getColumnCount(); - for (int i = 1; i <= columnCount; i++) { - System.out.print(metaData.getColumnLabel(i) + ": " + resultSet.getString(i) + "\t"); + properties.setProperty(TMQConstants.BOOTSTRAP_SERVERS, "127.0.0.1:6030"); + properties.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true"); + properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true"); + properties.setProperty(TMQConstants.GROUP_ID, "test"); + properties.setProperty(TMQConstants.VALUE_DESERIALIZER, + "com.taos.example.MetersDeserializer"); + + // poll data + try (TaosConsumer consumer = new TaosConsumer<>(properties)) { + consumer.subscribe(Collections.singletonList(TOPIC)); + while (!shutdown.get()) { + ConsumerRecords meters = consumer.poll(Duration.ofMillis(100)); + for (Meters meter : meters) { + System.out.println(meter); } - System.out.println(); - count++; } + consumer.unsubscribe(); } - } catch (Exception e) { + } catch (ClassNotFoundException | SQLException e) { e.printStackTrace(); - } finally { - try { - if (null != subscribe) - // close subscribe - subscribe.close(true); - if (connection != null) - connection.close(); - } catch (SQLException throwable) { - throwable.printStackTrace(); - } } + timer.cancel(); } } \ No newline at end of file diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java new file mode 100644 index 0000000000000000000000000000000000000000..04b149a4b96441ecfd1b0bdde54c9ed71349cab2 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java @@ -0,0 +1,63 @@ +package com.taos.example.highvolume; + +import java.sql.*; + +/** + * Prepare target database. + * Count total records in database periodically so that we can estimate the writing speed. + */ +public class DataBaseMonitor { + private Connection conn; + private Statement stmt; + + public DataBaseMonitor init() throws SQLException { + if (conn == null) { + String jdbcURL = System.getenv("TDENGINE_JDBC_URL"); + conn = DriverManager.getConnection(jdbcURL); + stmt = conn.createStatement(); + } + return this; + } + + public void close() { + try { + stmt.close(); + } catch (SQLException e) { + } + try { + conn.close(); + } catch (SQLException e) { + } + } + + public void prepareDatabase() throws SQLException { + stmt.execute("DROP DATABASE IF EXISTS test"); + stmt.execute("CREATE DATABASE test"); + stmt.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"); + } + + public Long count() throws SQLException { + if (!stmt.isClosed()) { + ResultSet result = stmt.executeQuery("SELECT count(*) from test.meters"); + result.next(); + return result.getLong(1); + } + return null; + } + + /** + * show test.stables; + * + * name | created_time | columns | tags | tables | + * ============================================================================================ + * meters | 2022-07-20 08:39:30.902 | 4 | 2 | 620000 | + */ + public Long getTableCount() throws SQLException { + if (!stmt.isClosed()) { + ResultSet result = stmt.executeQuery("show test.stables"); + result.next(); + return result.getLong(5); + } + return null; + } +} \ No newline at end of file diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java new file mode 100644 index 0000000000000000000000000000000000000000..41b59551ca69a4056c2f2b572d169bd08dc4fcfe --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java @@ -0,0 +1,70 @@ +package com.taos.example.highvolume; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.*; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; + + +public class FastWriteExample { + final static Logger logger = LoggerFactory.getLogger(FastWriteExample.class); + + final static int taskQueueCapacity = 1000000; + final static List> taskQueues = new ArrayList<>(); + final static List readTasks = new ArrayList<>(); + final static List writeTasks = new ArrayList<>(); + final static DataBaseMonitor databaseMonitor = new DataBaseMonitor(); + + public static void stopAll() { + logger.info("shutting down"); + readTasks.forEach(task -> task.stop()); + writeTasks.forEach(task -> task.stop()); + databaseMonitor.close(); + } + + public static void main(String[] args) throws InterruptedException, SQLException { + int readTaskCount = args.length > 0 ? Integer.parseInt(args[0]) : 1; + int writeTaskCount = args.length > 1 ? Integer.parseInt(args[1]) : 3; + int tableCount = args.length > 2 ? Integer.parseInt(args[2]) : 1000; + int maxBatchSize = args.length > 3 ? Integer.parseInt(args[3]) : 3000; + + logger.info("readTaskCount={}, writeTaskCount={} tableCount={} maxBatchSize={}", + readTaskCount, writeTaskCount, tableCount, maxBatchSize); + + databaseMonitor.init().prepareDatabase(); + + // Create task queues, whiting tasks and start writing threads. + for (int i = 0; i < writeTaskCount; ++i) { + BlockingQueue queue = new ArrayBlockingQueue<>(taskQueueCapacity); + taskQueues.add(queue); + WriteTask task = new WriteTask(queue, maxBatchSize); + Thread t = new Thread(task); + t.setName("WriteThread-" + i); + t.start(); + } + + // create reading tasks and start reading threads + int tableCountPerTask = tableCount / readTaskCount; + for (int i = 0; i < readTaskCount; ++i) { + ReadTask task = new ReadTask(i, taskQueues, tableCountPerTask); + Thread t = new Thread(task); + t.setName("ReadThread-" + i); + t.start(); + } + + Runtime.getRuntime().addShutdownHook(new Thread(FastWriteExample::stopAll)); + + long lastCount = 0; + while (true) { + Thread.sleep(10000); + long numberOfTable = databaseMonitor.getTableCount(); + long count = databaseMonitor.count(); + logger.info("numberOfTable={} count={} speed={}", numberOfTable, count, (count - lastCount) / 10); + lastCount = count; + } + } +} \ No newline at end of file diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java new file mode 100644 index 0000000000000000000000000000000000000000..6fe83f002ebcb9d82e026e9a32886fd22bfefbe9 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java @@ -0,0 +1,53 @@ +package com.taos.example.highvolume; + +import java.util.Iterator; + +/** + * Generate test data + */ +class MockDataSource implements Iterator { + private String tbNamePrefix; + private int tableCount; + private long maxRowsPerTable = 1000000000L; + + // 100 milliseconds between two neighbouring rows. + long startMs = System.currentTimeMillis() - maxRowsPerTable * 100; + private int currentRow = 0; + private int currentTbId = -1; + + // mock values + String[] location = {"LosAngeles", "SanDiego", "Hollywood", "Compton", "San Francisco"}; + float[] current = {8.8f, 10.7f, 9.9f, 8.9f, 9.4f}; + int[] voltage = {119, 116, 111, 113, 118}; + float[] phase = {0.32f, 0.34f, 0.33f, 0.329f, 0.141f}; + + public MockDataSource(String tbNamePrefix, int tableCount) { + this.tbNamePrefix = tbNamePrefix; + this.tableCount = tableCount; + } + + @Override + public boolean hasNext() { + currentTbId += 1; + if (currentTbId == tableCount) { + currentTbId = 0; + currentRow += 1; + } + return currentRow < maxRowsPerTable; + } + + @Override + public String next() { + long ts = startMs + 100 * currentRow; + int groupId = currentTbId % 5 == 0 ? currentTbId / 5 : currentTbId / 5 + 1; + StringBuilder sb = new StringBuilder(tbNamePrefix + "_" + currentTbId + ","); // tbName + sb.append(ts).append(','); // ts + sb.append(current[currentRow % 5]).append(','); // current + sb.append(voltage[currentRow % 5]).append(','); // voltage + sb.append(phase[currentRow % 5]).append(','); // phase + sb.append(location[currentRow % 5]).append(','); // location + sb.append(groupId); // groupID + + return sb.toString(); + } +} \ No newline at end of file diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java new file mode 100644 index 0000000000000000000000000000000000000000..a6fcfed1d28281d46aff493ef9783972858ebe62 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java @@ -0,0 +1,58 @@ +package com.taos.example.highvolume; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.BlockingQueue; + +class ReadTask implements Runnable { + private final static Logger logger = LoggerFactory.getLogger(ReadTask.class); + private final int taskId; + private final List> taskQueues; + private final int queueCount; + private final int tableCount; + private boolean active = true; + + public ReadTask(int readTaskId, List> queues, int tableCount) { + this.taskId = readTaskId; + this.taskQueues = queues; + this.queueCount = queues.size(); + this.tableCount = tableCount; + } + + /** + * Assign data received to different queues. + * Here we use the suffix number in table name. + * You are expected to define your own rule in practice. + * + * @param line record received + * @return which queue to use + */ + public int getQueueId(String line) { + String tbName = line.substring(0, line.indexOf(',')); // For example: tb1_101 + String suffixNumber = tbName.split("_")[1]; + return Integer.parseInt(suffixNumber) % this.queueCount; + } + + @Override + public void run() { + logger.info("started"); + Iterator it = new MockDataSource("tb" + this.taskId, tableCount); + try { + while (it.hasNext() && active) { + String line = it.next(); + int queueId = getQueueId(line); + taskQueues.get(queueId).put(line); + } + } catch (Exception e) { + logger.error("Read Task Error", e); + } + } + + public void stop() { + logger.info("stop"); + this.active = false; + } +} \ No newline at end of file diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java new file mode 100644 index 0000000000000000000000000000000000000000..c2989acdbe3d0f56d7451ac86051a55955ce14de --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java @@ -0,0 +1,205 @@ +package com.taos.example.highvolume; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.*; +import java.util.HashMap; +import java.util.Map; + +/** + * A helper class encapsulate the logic of writing using SQL. + *

    + * The main interfaces are two methods: + *

      + *
    1. {@link SQLWriter#processLine}, which receive raw lines from WriteTask and group them by table names.
    2. + *
    3. {@link SQLWriter#flush}, which assemble INSERT statement and execute it.
    4. + *
    + *

    + * There is a technical skill worth mentioning: we create table as needed when "table does not exist" error occur instead of creating table automatically using syntax "INSET INTO tb USING stb". + * This ensure that checking table existence is a one-time-only operation. + *

    + * + *

    + */ +public class SQLWriter { + final static Logger logger = LoggerFactory.getLogger(SQLWriter.class); + + private Connection conn; + private Statement stmt; + + /** + * current number of buffered records + */ + private int bufferedCount = 0; + /** + * Maximum number of buffered records. + * Flush action will be triggered if bufferedCount reached this value, + */ + private int maxBatchSize; + + + /** + * Maximum SQL length. + */ + private int maxSQLLength; + + /** + * Map from table name to column values. For example: + * "tb001" -> "(1648432611249,2.1,114,0.09) (1648432611250,2.2,135,0.2)" + */ + private Map tbValues = new HashMap<>(); + + /** + * Map from table name to tag values in the same order as creating stable. + * Used for creating table. + */ + private Map tbTags = new HashMap<>(); + + public SQLWriter(int maxBatchSize) { + this.maxBatchSize = maxBatchSize; + } + + + /** + * Get Database Connection + * + * @return Connection + * @throws SQLException + */ + private static Connection getConnection() throws SQLException { + String jdbcURL = System.getenv("TDENGINE_JDBC_URL"); + return DriverManager.getConnection(jdbcURL); + } + + /** + * Create Connection and Statement + * + * @throws SQLException + */ + public void init() throws SQLException { + conn = getConnection(); + stmt = conn.createStatement(); + stmt.execute("use test"); + ResultSet rs = stmt.executeQuery("show variables"); + while (rs.next()) { + String configName = rs.getString(1); + if ("maxSQLLength".equals(configName)) { + maxSQLLength = Integer.parseInt(rs.getString(2)); + logger.info("maxSQLLength={}", maxSQLLength); + } + } + } + + /** + * Convert raw data to SQL fragments, group them by table name and cache them in a HashMap. + * Trigger writing when number of buffered records reached maxBachSize. + * + * @param line raw data get from task queue in format: tbName,ts,current,voltage,phase,location,groupId + */ + public void processLine(String line) throws SQLException { + bufferedCount += 1; + int firstComma = line.indexOf(','); + String tbName = line.substring(0, firstComma); + int lastComma = line.lastIndexOf(','); + int secondLastComma = line.lastIndexOf(',', lastComma - 1); + String value = "(" + line.substring(firstComma + 1, secondLastComma) + ") "; + if (tbValues.containsKey(tbName)) { + tbValues.put(tbName, tbValues.get(tbName) + value); + } else { + tbValues.put(tbName, value); + } + if (!tbTags.containsKey(tbName)) { + String location = line.substring(secondLastComma + 1, lastComma); + String groupId = line.substring(lastComma + 1); + String tagValues = "('" + location + "'," + groupId + ')'; + tbTags.put(tbName, tagValues); + } + if (bufferedCount == maxBatchSize) { + flush(); + } + } + + + /** + * Assemble INSERT statement using buffered SQL fragments in Map {@link SQLWriter#tbValues} and execute it. + * In case of "Table does not exit" exception, create all tables in the sql and retry the sql. + */ + public void flush() throws SQLException { + StringBuilder sb = new StringBuilder("INSERT INTO "); + for (Map.Entry entry : tbValues.entrySet()) { + String tableName = entry.getKey(); + String values = entry.getValue(); + String q = tableName + " values " + values + " "; + if (sb.length() + q.length() > maxSQLLength) { + executeSQL(sb.toString()); + logger.warn("increase maxSQLLength or decrease maxBatchSize to gain better performance"); + sb = new StringBuilder("INSERT INTO "); + } + sb.append(q); + } + executeSQL(sb.toString()); + tbValues.clear(); + bufferedCount = 0; + } + + private void executeSQL(String sql) throws SQLException { + try { + stmt.executeUpdate(sql); + } catch (SQLException e) { + // convert to error code defined in taoserror.h + int errorCode = e.getErrorCode() & 0xffff; + if (errorCode == 0x362 || errorCode == 0x218) { + // Table does not exist + createTables(); + executeSQL(sql); + } else { + logger.error("Execute SQL: {}", sql); + throw e; + } + } catch (Throwable throwable) { + logger.error("Execute SQL: {}", sql); + throw throwable; + } + } + + /** + * Create tables in batch using syntax: + *

    + * CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...; + *

    + */ + private void createTables() throws SQLException { + StringBuilder sb = new StringBuilder("CREATE TABLE "); + for (String tbName : tbValues.keySet()) { + String tagValues = tbTags.get(tbName); + sb.append("IF NOT EXISTS ").append(tbName).append(" USING meters TAGS ").append(tagValues).append(" "); + } + String sql = sb.toString(); + try { + stmt.executeUpdate(sql); + } catch (Throwable throwable) { + logger.error("Execute SQL: {}", sql); + throw throwable; + } + } + + public boolean hasBufferedValues() { + return bufferedCount > 0; + } + + public int getBufferedCount() { + return bufferedCount; + } + + public void close() { + try { + stmt.close(); + } catch (SQLException e) { + } + try { + conn.close(); + } catch (SQLException e) { + } + } +} \ No newline at end of file diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java new file mode 100644 index 0000000000000000000000000000000000000000..8ade06625d708a112c85d5657aa00bcd0e605ff4 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java @@ -0,0 +1,4 @@ +package com.taos.example.highvolume; + +public class StmtWriter { +} diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java new file mode 100644 index 0000000000000000000000000000000000000000..de9e5463d7dc59478f991e4783aacaae527b4c4b --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java @@ -0,0 +1,58 @@ +package com.taos.example.highvolume; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.BlockingQueue; + +class WriteTask implements Runnable { + private final static Logger logger = LoggerFactory.getLogger(WriteTask.class); + private final int maxBatchSize; + + // the queue from which this writing task get raw data. + private final BlockingQueue queue; + + // A flag indicate whether to continue. + private boolean active = true; + + public WriteTask(BlockingQueue taskQueue, int maxBatchSize) { + this.queue = taskQueue; + this.maxBatchSize = maxBatchSize; + } + + @Override + public void run() { + logger.info("started"); + String line = null; // data getting from the queue just now. + SQLWriter writer = new SQLWriter(maxBatchSize); + try { + writer.init(); + while (active) { + line = queue.poll(); + if (line != null) { + // parse raw data and buffer the data. + writer.processLine(line); + } else if (writer.hasBufferedValues()) { + // write data immediately if no more data in the queue + writer.flush(); + } else { + // sleep a while to avoid high CPU usage if no more data in the queue and no buffered records, . + Thread.sleep(100); + } + } + if (writer.hasBufferedValues()) { + writer.flush(); + } + } catch (Exception e) { + String msg = String.format("line=%s, bufferedCount=%s", line, writer.getBufferedCount()); + logger.error(msg, e); + } finally { + writer.close(); + } + } + + public void stop() { + logger.info("stop"); + this.active = false; + } +} \ No newline at end of file diff --git a/docs/examples/java/src/test/java/com/taos/test/TestAll.java b/docs/examples/java/src/test/java/com/taos/test/TestAll.java index 42db24485afec05298159f7b0c3a4e15835d98ed..8d201da0745e1d2d36220c9d78383fc37d4a813a 100644 --- a/docs/examples/java/src/test/java/com/taos/test/TestAll.java +++ b/docs/examples/java/src/test/java/com/taos/test/TestAll.java @@ -23,16 +23,16 @@ public class TestAll { String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; try (Connection conn = DriverManager.getConnection(jdbcUrl)) { try (Statement stmt = conn.createStatement()) { - String sql = "INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" + - " power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" + - " power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" + - " power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" + - " power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" + - " power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" + - " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" + - " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" + - " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" + - " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)"; + String sql = "INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" + + " power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" + + " power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" + + " power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" + + " power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" + + " power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" + + " power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" + + " power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" + + " power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" + + " power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)"; stmt.execute(sql); } diff --git a/docs/examples/node/nativeexample/subscribe_demo.js b/docs/examples/node/nativeexample/subscribe_demo.js index c4f7e6df84933f8f8541814cabd231fcf5c2db68..53cbe55d264a0e83b4d4b441b0b912872bbb7018 100644 --- a/docs/examples/node/nativeexample/subscribe_demo.js +++ b/docs/examples/node/nativeexample/subscribe_demo.js @@ -48,4 +48,4 @@ try { cursor.close(); conn.close(); }, 2000); -} \ No newline at end of file +} diff --git a/docs/examples/node/package.json b/docs/examples/node/package.json index 36d3f016b5262472d5c63a2c98cc9704e57a59fe..d00d71d99fdff89af68f31a50416681733a08274 100644 --- a/docs/examples/node/package.json +++ b/docs/examples/node/package.json @@ -4,7 +4,7 @@ "main": "index.js", "license": "MIT", "dependencies": { - "@tdengine/client": "^3.0.0", + "@tdengine/client": "^3.0.1", "@tdengine/rest": "^3.0.0" } } diff --git a/docs/examples/python/connect_native_reference.py b/docs/examples/python/connect_native_reference.py index c17e9795b58724f6646b8d7c0f84047098a93d69..09b0685acef8c68b30153da5a1761d36b1cf9aae 100644 --- a/docs/examples/python/connect_native_reference.py +++ b/docs/examples/python/connect_native_reference.py @@ -11,10 +11,10 @@ conn: taos.TaosConnection = taos.connect(host="localhost", server_version = conn.server_info print("server_version", server_version) client_version = conn.client_info -print("client_version", client_version) # 2.4.0.16 +print("client_version", client_version) # 3.0.0.0 conn.close() # possible output: -# 2.4.0.16 -# 2.4.0.16 +# 3.0.0.0 +# 3.0.0.0 diff --git a/docs/examples/python/fast_write_example.py b/docs/examples/python/fast_write_example.py new file mode 100644 index 0000000000000000000000000000000000000000..c9d606388fdecd85f1468f24cc497ecc5941f035 --- /dev/null +++ b/docs/examples/python/fast_write_example.py @@ -0,0 +1,180 @@ +# install dependencies: +# recommend python >= 3.8 +# pip3 install faster-fifo +# + +import logging +import math +import sys +import time +import os +from multiprocessing import Process +from faster_fifo import Queue +from mockdatasource import MockDataSource +from queue import Empty +from typing import List + +logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format="%(asctime)s [%(name)s] - %(message)s") + +READ_TASK_COUNT = 1 +WRITE_TASK_COUNT = 1 +TABLE_COUNT = 1000 +QUEUE_SIZE = 1000000 +MAX_BATCH_SIZE = 3000 + +read_processes = [] +write_processes = [] + + +def get_connection(): + """ + If variable TDENGINE_FIRST_EP is provided then it will be used. If not, firstEP in /etc/taos/taos.cfg will be used. + You can also override the default username and password by supply variable TDENGINE_USER and TDENGINE_PASSWORD + """ + import taos + firstEP = os.environ.get("TDENGINE_FIRST_EP") + if firstEP: + host, port = firstEP.split(":") + else: + host, port = None, 0 + user = os.environ.get("TDENGINE_USER", "root") + password = os.environ.get("TDENGINE_PASSWORD", "taosdata") + return taos.connect(host=host, port=int(port), user=user, password=password) + + +# ANCHOR: read + +def run_read_task(task_id: int, task_queues: List[Queue]): + table_count_per_task = TABLE_COUNT // READ_TASK_COUNT + data_source = MockDataSource(f"tb{task_id}", table_count_per_task) + try: + for batch in data_source: + for table_id, rows in batch: + # hash data to different queue + i = table_id % len(task_queues) + # block putting forever when the queue is full + task_queues[i].put_many(rows, block=True, timeout=-1) + except KeyboardInterrupt: + pass + + +# ANCHOR_END: read + +# ANCHOR: write +def run_write_task(task_id: int, queue: Queue): + from sql_writer import SQLWriter + log = logging.getLogger(f"WriteTask-{task_id}") + writer = SQLWriter(get_connection) + lines = None + try: + while True: + try: + # get as many as possible + lines = queue.get_many(block=False, max_messages_to_get=MAX_BATCH_SIZE) + writer.process_lines(lines) + except Empty: + time.sleep(0.01) + except KeyboardInterrupt: + pass + except BaseException as e: + log.debug(f"lines={lines}") + raise e + + +# ANCHOR_END: write + +def set_global_config(): + argc = len(sys.argv) + if argc > 1: + global READ_TASK_COUNT + READ_TASK_COUNT = int(sys.argv[1]) + if argc > 2: + global WRITE_TASK_COUNT + WRITE_TASK_COUNT = int(sys.argv[2]) + if argc > 3: + global TABLE_COUNT + TABLE_COUNT = int(sys.argv[3]) + if argc > 4: + global QUEUE_SIZE + QUEUE_SIZE = int(sys.argv[4]) + if argc > 5: + global MAX_BATCH_SIZE + MAX_BATCH_SIZE = int(sys.argv[5]) + + +# ANCHOR: monitor +def run_monitor_process(): + log = logging.getLogger("DataBaseMonitor") + conn = get_connection() + conn.execute("DROP DATABASE IF EXISTS test") + conn.execute("CREATE DATABASE test") + conn.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) " + "TAGS (location BINARY(64), groupId INT)") + + def get_count(): + res = conn.query("SELECT count(*) FROM test.meters") + rows = res.fetch_all() + return rows[0][0] if rows else 0 + + last_count = 0 + while True: + time.sleep(10) + count = get_count() + log.info(f"count={count} speed={(count - last_count) / 10}") + last_count = count + + +# ANCHOR_END: monitor +# ANCHOR: main +def main(): + set_global_config() + logging.info(f"READ_TASK_COUNT={READ_TASK_COUNT}, WRITE_TASK_COUNT={WRITE_TASK_COUNT}, " + f"TABLE_COUNT={TABLE_COUNT}, QUEUE_SIZE={QUEUE_SIZE}, MAX_BATCH_SIZE={MAX_BATCH_SIZE}") + + monitor_process = Process(target=run_monitor_process) + monitor_process.start() + time.sleep(3) # waiting for database ready. + + task_queues: List[Queue] = [] + # create task queues + for i in range(WRITE_TASK_COUNT): + queue = Queue(max_size_bytes=QUEUE_SIZE) + task_queues.append(queue) + + # create write processes + for i in range(WRITE_TASK_COUNT): + p = Process(target=run_write_task, args=(i, task_queues[i])) + p.start() + logging.debug(f"WriteTask-{i} started with pid {p.pid}") + write_processes.append(p) + + # create read processes + for i in range(READ_TASK_COUNT): + queues = assign_queues(i, task_queues) + p = Process(target=run_read_task, args=(i, queues)) + p.start() + logging.debug(f"ReadTask-{i} started with pid {p.pid}") + read_processes.append(p) + + try: + monitor_process.join() + except KeyboardInterrupt: + monitor_process.terminate() + [p.terminate() for p in read_processes] + [p.terminate() for p in write_processes] + [q.close() for q in task_queues] + + +def assign_queues(read_task_id, task_queues): + """ + Compute target queues for a specific read task. + """ + ratio = WRITE_TASK_COUNT / READ_TASK_COUNT + from_index = math.floor(read_task_id * ratio) + end_index = math.ceil((read_task_id + 1) * ratio) + return task_queues[from_index:end_index] + + +if __name__ == '__main__': + main() +# ANCHOR_END: main diff --git a/docs/examples/python/mockdatasource.py b/docs/examples/python/mockdatasource.py new file mode 100644 index 0000000000000000000000000000000000000000..852860aec0adc8f9b043c9dcd5deb0bf00239201 --- /dev/null +++ b/docs/examples/python/mockdatasource.py @@ -0,0 +1,49 @@ +import time + + +class MockDataSource: + samples = [ + "8.8,119,0.32,LosAngeles,0", + "10.7,116,0.34,SanDiego,1", + "9.9,111,0.33,Hollywood,2", + "8.9,113,0.329,Compton,3", + "9.4,118,0.141,San Francisco,4" + ] + + def __init__(self, tb_name_prefix, table_count): + self.table_name_prefix = tb_name_prefix + "_" + self.table_count = table_count + self.max_rows = 10000000 + self.current_ts = round(time.time() * 1000) - self.max_rows * 100 + # [(tableId, tableName, values),] + self.data = self._init_data() + + def _init_data(self): + lines = self.samples * (self.table_count // 5 + 1) + data = [] + for i in range(self.table_count): + table_name = self.table_name_prefix + str(i) + data.append((i, table_name, lines[i])) # tableId, row + return data + + def __iter__(self): + self.row = 0 + return self + + def __next__(self): + """ + next 1000 rows for each table. + return: {tableId:[row,...]} + """ + # generate 1000 timestamps + ts = [] + for _ in range(1000): + self.current_ts += 100 + ts.append(str(self.current_ts)) + # add timestamp to each row + # [(tableId, ["tableName,ts,current,voltage,phase,location,groupId"])] + result = [] + for table_id, table_name, values in self.data: + rows = [table_name + ',' + t + ',' + values for t in ts] + result.append((table_id, rows)) + return result diff --git a/docs/examples/python/sql_writer.py b/docs/examples/python/sql_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..758167376b009f21afc701be7d89c1bfbabdeb9f --- /dev/null +++ b/docs/examples/python/sql_writer.py @@ -0,0 +1,90 @@ +import logging +import taos + + +class SQLWriter: + log = logging.getLogger("SQLWriter") + + def __init__(self, get_connection_func): + self._tb_values = {} + self._tb_tags = {} + self._conn = get_connection_func() + self._max_sql_length = self.get_max_sql_length() + self._conn.execute("USE test") + + def get_max_sql_length(self): + rows = self._conn.query("SHOW variables").fetch_all() + for r in rows: + name = r[0] + if name == "maxSQLLength": + return int(r[1]) + return 1024 * 1024 + + def process_lines(self, lines: str): + """ + :param lines: [[tbName,ts,current,voltage,phase,location,groupId]] + """ + for line in lines: + ps = line.split(",") + table_name = ps[0] + value = '(' + ",".join(ps[1:-2]) + ') ' + if table_name in self._tb_values: + self._tb_values[table_name] += value + else: + self._tb_values[table_name] = value + + if table_name not in self._tb_tags: + location = ps[-2] + group_id = ps[-1] + tag_value = f"('{location}',{group_id})" + self._tb_tags[table_name] = tag_value + self.flush() + + def flush(self): + """ + Assemble INSERT statement and execute it. + When the sql length grows close to MAX_SQL_LENGTH, the sql will be executed immediately, and a new INSERT statement will be created. + In case of "Table does not exit" exception, tables in the sql will be created and the sql will be re-executed. + """ + sql = "INSERT INTO " + sql_len = len(sql) + buf = [] + for tb_name, values in self._tb_values.items(): + q = tb_name + " VALUES " + values + if sql_len + len(q) >= self._max_sql_length: + sql += " ".join(buf) + self.execute_sql(sql) + sql = "INSERT INTO " + sql_len = len(sql) + buf = [] + buf.append(q) + sql_len += len(q) + sql += " ".join(buf) + self.execute_sql(sql) + self._tb_values.clear() + + def execute_sql(self, sql): + try: + self._conn.execute(sql) + except taos.Error as e: + error_code = e.errno & 0xffff + # Table does not exit + if error_code == 9731: + self.create_tables() + else: + self.log.error("Execute SQL: %s", sql) + raise e + except BaseException as baseException: + self.log.error("Execute SQL: %s", sql) + raise baseException + + def create_tables(self): + sql = "CREATE TABLE " + for tb in self._tb_values.keys(): + tag_values = self._tb_tags[tb] + sql += "IF NOT EXISTS " + tb + " USING meters TAGS " + tag_values + " " + try: + self._conn.execute(sql) + except BaseException as e: + self.log.error("Execute SQL: %s", sql) + raise e diff --git a/docs/examples/python/subscribe_demo.py b/docs/examples/python/subscribe_demo.py deleted file mode 100644 index db9d49c3f4f8122634800c02a683d4cb022a7ba0..0000000000000000000000000000000000000000 --- a/docs/examples/python/subscribe_demo.py +++ /dev/null @@ -1,38 +0,0 @@ -""" -Python asynchronous subscribe demo. -run on Linux system with: python3 subscribe_demo.py -""" - -from ctypes import c_void_p - -import taos -import time - - -def query_callback(p_sub, p_result, p_param, code): - """ - :param p_sub: pointer returned by native API -- taos_subscribe - :param p_result: pointer to native TAOS_RES - :param p_param: None - :param code: error code - :return: None - """ - print("in callback") - result = taos.TaosResult(c_void_p(p_result)) - # raise exception if error occur - result.check_error(code) - for row in result.rows_iter(): - print(row) - print(f"{result.row_count} rows consumed.") - - -if __name__ == '__main__': - conn = taos.connect() - restart = True - topic = "topic-meter-current-bg" - sql = "select * from power.meters where current > 10" # Error sql - interval = 2000 # consumption interval in microseconds. - _ = conn.subscribe(restart, topic, sql, interval, query_callback) - # Note: we received the return value as _ above, to avoid the TaosSubscription object to be deleted by gc. - while True: - time.sleep(10) # use Ctrl + C to interrupt diff --git a/docs/examples/python/tmq_example.py b/docs/examples/python/tmq_example.py index 1f6da3d1b6690ab12527c1810286ba22a7688851..cee036454ec4d3f4809576a1eee8ac054fcba056 100644 --- a/docs/examples/python/tmq_example.py +++ b/docs/examples/python/tmq_example.py @@ -1,59 +1,6 @@ import taos -from taos.tmq import * - -conn = taos.connect() - -# create database -conn.execute("drop database if exists py_tmq") -conn.execute("create database if not exists py_tmq vgroups 2") - -# create table and stables -conn.select_db("py_tmq") -conn.execute("create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)") -conn.execute("create table if not exists tb1 using stb1 tags(1)") -conn.execute("create table if not exists tb2 using stb1 tags(2)") -conn.execute("create table if not exists tb3 using stb1 tags(3)") - -# create topic -conn.execute("drop topic if exists topic_ctb_column") -conn.execute("create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1") - -# set consumer configure options -conf = TaosTmqConf() -conf.set("group.id", "tg2") -conf.set("td.connect.user", "root") -conf.set("td.connect.pass", "taosdata") -conf.set("enable.auto.commit", "true") -conf.set("msg.with.table.name", "true") - -def tmq_commit_cb_print(tmq, resp, offset, param=None): - print(f"commit: {resp}, tmq: {tmq}, offset: {offset}, param: {param}") - -conf.set_auto_commit_cb(tmq_commit_cb_print, None) - -# build consumer -tmq = conf.new_consumer() - -# build topic list -topic_list = TaosTmqList() -topic_list.append("topic_ctb_column") - -# subscribe consumer -tmq.subscribe(topic_list) - -# check subscriptions -sub_list = tmq.subscription() -print("subscribed topics: ",sub_list) - -# start subscribe -while 1: - res = tmq.poll(1000) - if res: - topic = res.get_topic_name() - vg = res.get_vgroup_id() - db = res.get_db_name() - print(f"topic: {topic}\nvgroup id: {vg}\ndb: {db}") - for row in res: - print(row) - tb = res.get_table_name() - print(f"from table: {tb}") +from taos.tmq import TaosConsumer +consumer = TaosConsumer('topic_ctb_column', group_id='vg2') +for msg in consumer: + for row in msg: + print(row) diff --git a/docs/examples/rust/Cargo.toml b/docs/examples/rust/Cargo.toml deleted file mode 100644 index 136d09ffbbbd9c7bc1b876e7bfc630dea0560382..0000000000000000000000000000000000000000 --- a/docs/examples/rust/Cargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[workspace] -members = ["restexample", "nativeexample"] diff --git a/docs/zh/01-index.md b/docs/zh/01-index.md index 64a7d419e1cdf9a0e2617ad65c07be5c29fb7b62..f9127121f35c8cdb9d28e121c20b9b7bb9101625 100644 --- a/docs/zh/01-index.md +++ b/docs/zh/01-index.md @@ -4,22 +4,22 @@ sidebar_label: 文档首页 slug: / --- -TDengine是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库(Time-Series Database, TSDB), 它专为物联网、工业互联网、金融等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一极简的时序数据处理平台。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。 +TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库Time Series Database, TSDB), 它专为物联网、车联网、工业互联网、金融、IT 运维等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一款极简的时序数据处理平台。本文档是 TDengine 的用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发工程师与系统管理员的。 -TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用TDengine, 无论如何,请您仔细阅读[基本概念](./concept)一章。 +TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论如何,请您仔细阅读[基本概念](./concept)一章。 -如果你是开发者,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要把示例代码拷贝粘贴,针对自己的应用稍作改动,就能跑起来。 +如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。 -我们已经生活在大数据的时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请参考[部署集群](./deployment)一章。 +我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[部署集群](./deployment)一章。 -TDengine 采用 SQL 作为其查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。 +TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。 -如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出,配置参数,怎么监测 TDengine 是否健康运行,怎么提升系统运行的性能,那么请仔细参考[运维指南](./operation)一章。 +如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出、配置参数,如何监测 TDengine 是否健康运行,如何提升系统运行的性能,请仔细参考[运维指南](./operation)一章。 -如果你对 TDengine 外围工具,REST API, 各种编程语言的连接器想做更多详细了解,请看[参考指南](./reference)一章。 +如果你对 TDengine 的外围工具、REST API、各种编程语言的连接器(Connector)想做更多详细了解,请看[参考指南](./reference)一章。 -如果你对 TDengine 内部的架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。 +如果你对 TDengine 的内部架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。 -最后,作为一个开源软件,欢迎大家的参与。如果发现文档的任何错误,描述不清晰的地方,都请在每个页面的最下方,点击“编辑本文档“直接进行修改。 +最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。 Together, we make a difference! diff --git a/docs/zh/02-intro.md b/docs/zh/02-intro.md index 97322c68a2cf40205e388f4f135192f8e0b1b095..012c49d2c3c82d5865eb2d8e76f37bb0f0f69e8b 100644 --- a/docs/zh/02-intro.md +++ b/docs/zh/02-intro.md @@ -1,74 +1,98 @@ --- title: 产品简介 +description: 简要介绍 TDengine 的主要功能 toc_max_heading_level: 2 --- -TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/tdengine/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。 +TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。 -本章节介绍TDengine的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对TDengine有个整体的了解。 +本章节介绍 TDengine 的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。 ## 主要功能 -TDengine的主要功能如下: - -1. 高速数据写入,除 [SQL 写入](../develop/insert-data/sql-writing)外,还支持 [Schemaless 写入](../reference/schemaless/),支持 [InfluxDB LINE 协议](../develop/insert-data/influxdb-line),[OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json)等协议写入; -2. 第三方数据采集工具 [Telegraf](../third-party/telegraf),[Prometheus](../third-party/prometheus),[StatsD](../third-party/statsd),[collectd](../third-party/collectd),[icinga2](../third-party/icinga2), [TCollector](../third-party/tcollector), [EMQ](../third-party/emq-broker), [HiveMQ](../third-party/hive-mq-broker) 等都可以进行配置后,不用任何代码,即可将数据写入; -3. 支持[各种查询](../develop/query-data),包括聚合查询、嵌套查询、降采样查询、插值等 -4. 支持[用户自定义函数](../develop/udf) -5. 支持[缓存](../develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis -6. 支持[流式计算](../develop/stream)(Stream Processing) -7. 支持[数据订阅](../develop/tmq),而且可以指定过滤条件 -8. 支持[集群](../deployment/),可以通过多节点进行水平扩展,并通过多副本实现高可靠 -9. 提供[命令行程序](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询 -10. 提供多种数据的[导入](../operation/import)、[导出](../operation/export) -11. 支持对[TDengine 集群本身的监控](../operation/monitor) -12. 提供 [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) 等多种编程语言的[连接器](../reference/connector/) -13. 支持 [REST 接口](../reference/rest-api/) -14. 支持与[ Grafana 无缝集成](../third-party/grafana) -15. 支持与 Google Data Studio 无缝集成 -16. 支持 [Kubernetes 部署](../deployment/k8s) - -更多细小的功能,请阅读整个文档。 +TDengine 的主要功能如下: + +1. 写入数据,支持 + - [SQL 写入](../develop/insert-data/sql-writing) + - [无模式(Schemaless)写入](../reference/schemaless/),支持多种标准写入协议 + - [InfluxDB Line 协议](../develop/insert-data/influxdb-line) + - [OpenTSDB Telnet 协议](../develop/insert-data/opentsdb-telnet) + - [OpenTSDB JSON 协议](../develop/insert-data/opentsdb-json) + - 与多种第三方工具的无缝集成,它们都可以仅通过配置而无需任何代码即可将数据写入 TDengine + - [Telegraf](../third-party/telegraf) + - [Prometheus](../third-party/prometheus) + - [StatsD](../third-party/statsd) + - [collectd](../third-party/collectd) + - [Icinga2](../third-party/icinga2) + - [TCollector](../third-party/tcollector) + - [EMQX](../third-party/emq-broker) + - [HiveMQ](../third-party/hive-mq-broker) ; +2. 查询数据,支持 + - [标准 SQL](../taos-sql),含嵌套查询 + - [时序数据特色函数](../taos-sql/function/#time-series-extensions) + - [时序数据特色查询](../taos-sql/distinguished),例如降采样、插值、累加和、时间加权平均、状态窗口、会话窗口等 + - [用户自定义函数(UDF)](../taos-sql/udf) +3. [缓存](../develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis 就能对时序数据进行高效处理 +4. [流式计算(Stream Processing)](../develop/stream),TDengine 不仅支持连续查询,还支持事件驱动的流式计算,这样在处理时序数据时就无需 Flink 或 Spark 这样流式计算组件 +5. [数据订阅](../develop/tmq),应用程序可以订阅一张表或一组表的数据,提供与 Kafka 相同的 API,而且可以指定过滤条件 +6. 可视化 + - 支持与 [Grafana](../third-party/grafana/) 的无缝集成 + - 支持与 Google Data Studio 的无缝集成 +7. 集群 + - [集群部署](../deployment/),可以通过增加节点进行水平扩展以提升处理能力 + - 可以通过 [Kubernetes 部署 TDengine](../deployment/k8s/) + - 通过多副本提供高可用能力 +8. 管理 + - [监控](../operation/monitor)运行中的 TDengine 实例 + - 多种[数据导入](../operation/import)方式 + - 多种[数据导出](../operation/export)方式 +9. 工具 + - 提供[交互式命令行程序(CLI)](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询 + - 提供压力测试工具[taosBenchmark](../reference/taosbenchmark),用于测试 TDengine 的性能 +10. 编程 + - 提供各种语言的[连接器(Connector)](../connector): 如 [C/C++](../connector/cpp)、[Java](../connector/java)、[Go](../connector/go)、[Node.js](../connector/node)、[Rust](../connector/rust)、[Python](../connector/python)、[C#](../connector/csharp) 等 + - 支持 [REST 接口](../connector/rest-api/) + +更多细节功能,请阅读整个文档。 ## 竞争优势 -由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html),比如结构化、无需事务、很少删除或更新、写多读少等等,设计了全新的针对时序数据的存储引擎和计算引擎,因此与其他时序数据库相比,TDengine 有以下特点: +由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html),比如结构化、无需事务、很少删除或更新、写多读少等等,因此与其他时序数据库相比,TDengine 有以下特点: -- **[高性能](https://www.taosdata.com/tdengine/fast)**:通过创新的存储引擎设计,无论是数据写入还是查询,TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,存储空间不及通用数据库的1/10。 +- **[高性能](https://www.taosdata.com/tdengine/fast)**:TDengine 是唯一一个解决了时序数据存储的高基数难题的时序数据库,支持上亿数据采集点,并在数据插入、查询和数据压缩上远胜其它时序数据库。 -- **[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)**:通过原生分布式的设计,充分利用云平台的优势,TDengine 提供了水平扩展能力,具备弹性、韧性和可观测性,支持k8s部署,可运行在公有云、私有云和混合云上。 +- **[极简时序数据平台](https://www.taosdata.com/tdengine/simplified_solution_for_time-series_data_processing)**:TDengine 内建缓存、流式计算和数据订阅等功能,为时序数据的处理提供了极简的解决方案,从而大幅降低了业务系统的设计复杂度和运维成本。 -- **[极简时序数据平台](https://www.taosdata.com/tdengine/simplified_solution_for_time-series_data_processing)**:TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本。 +- **[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)**:通过原生的分布式设计、数据分片和分区、存算分离、RAFT 协议、Kubernetes 部署和完整的可观测性,TDengine 是一款云原生时序数据库并且能够部署在公有云、私有云和混合云上。 -- **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**:支持 SQL,同时为时序数据特有的分析提供SQL扩展。通过超级表、存储计算分离、分区分片、预计算、自定义函数等技术,TDengine 具备强大的分析能力。 +- **[简单易用](https://www.taosdata.com/tdengine/ease_of_use)**:对系统管理员来说,TDengine 大幅降低了管理和维护的代价。对开发者来说, TDengine 提供了简单的接口、极简的解决方案和与第三方工具的无缝集成。对数据分析专家来说,TDengine 提供了便捷的数据访问能力。 -- **[简单易用](https://www.taosdata.com/tdengine/ease_of_use)**:无任何依赖,安装、集群几秒搞定;提供REST以及各种语言连接器,与众多第三方工具无缝集成;提供命令行程序,便于管理和即席查询;提供各种运维工具。 +- **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**:通过超级表、存储计算分离、分区分片、预计算和其它技术,TDengine 能够高效地浏览、格式化和访问数据。 -- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**:TDengine 的核心代码包括集群功能全部开源,截止到2022年8月1日,全球超过 135.9k 个运行实例,GitHub Star 18.7k,Fork 4.4k,社区活跃。 +- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**:TDengine 的核心代码包括集群功能全部在开源协议下公开。全球超过 140k 个运行实例,GitHub Star 19k,且拥有一个活跃的开发者社区。 采用 TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。表现在几个方面: -1. 由于其超强性能,它能将系统需要的计算资源和存储资源大幅降低 +1. 由于其超强性能,它能将系统所需的计算资源和存储资源大幅降低 2. 因为支持 SQL,能与众多第三方软件无缝集成,学习迁移成本大幅下降 -3. 因为是一极简的时序数据平台,系统复杂度、研发和运营成本大幅降低 -4. 因为维护简单,运营维护成本能大幅降低 +3. 因为是一款极简的时序数据平台,系统复杂度、研发和运营成本大幅降低 ## 技术生态 -在整个时序大数据平台中,TDengine 在其中扮演的角色如下: +在整个时序大数据平台中,TDengine 扮演的角色如下:
    ![TDengine Database 技术生态图](eco_system.webp) +
    图 1. TDengine技术生态图
    -
    图 1. TDengine技术生态图
    -上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka, 他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序 (CLI) 以及可视化管理管理。 +上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka,他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序(CLI)以及可视化管理工具。 -## 总体适用场景 +## 典型适用场景 -作为一个高性能、分布式、支持 SQL 的时序数据库 (Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。本文对适用场景做更多详细的分析。 +作为一个高性能、分布式、支持 SQL 的时序数据库(Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因其充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。 ### 数据源特点和需求 @@ -90,18 +114,18 @@ TDengine的主要功能如下: ### 系统功能需求 -| 系统功能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 | -| -------------------------- | ------ | -------- | -------- | --------------------------------------------------------------------------------------------------------------------- | -| 要求完整的内置数据处理算法 | | √ | | TDengine 的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。 | -| 需要大量的交叉查询处理 | | √ | | 这种类型的处理更多应该用关系型数据系统处理,或者应该考虑 TDengine 和关系型数据系统配合实现系统功能。 | +| 系统功能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 | +| -------------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------------------------- | +| 要求完整的内置数据处理算法 | | √ | | TDengine 实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有需求,因此特殊类型的处理需求还需要在应用层面解决。 | +| 需要大量的交叉查询处理 | | √ | | 这种类型的处理更多应该用关系型数据库处理,或者应该考虑 TDengine 和关系型数据库配合实现系统功能。 | ### 系统性能需求 -| 系统性能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 | -| ---------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------ | -| 要求较大的总体处理能力 | | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。 | -| 要求高速处理数据 | | | √ | TDengine 的专门为 IoT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。 | -| 要求快速处理小粒度数据 | | | √ | 这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。 | +| 系统性能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 | +| ---------------------- | ------ | -------- | -------- | -------------------------------------------------------------------------------------------------- | +| 要求较大的总体处理能力 | | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。 | +| 要求高速处理数据 | | | √ | TDengine 专门为 IoT 优化的存储和数据处理设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。 | +| 要求快速处理小粒度数据 | | | √ | 这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。 | ### 系统维护需求 diff --git a/docs/zh/04-concept/index.md b/docs/zh/04-concept/index.md index 8e97d4a2f43537c1229c8e8ea092ddfc1257dde7..89d3df9c973d9a319397285599e6b2e6be3785de 100644 --- a/docs/zh/04-concept/index.md +++ b/docs/zh/04-concept/index.md @@ -1,5 +1,7 @@ --- +sidebar_label: 基本概念 title: 数据模型和基本概念 +description: TDengine 的数据模型和基本概念 --- 为了便于解释基本概念,便于撰写示例程序,整个 TDengine 文档以智能电表作为典型时序数据场景。假设每个智能电表采集电流、电压、相位三个量,有多个智能电表,每个电表有位置 location 和分组 group ID 的静态属性. 其采集的数据类似如下的表格: @@ -104,15 +106,15 @@ title: 数据模型和基本概念 ## 采集量 (Metric) -采集量是指传感器、设备或其他类型采集点采集的物理量,比如电流、电压、温度、压力、GPS 位置等,是随时间变化的,数据类型可以是整型、浮点型、布尔型,也可是字符串。随着时间的推移,存储的采集量的数据量越来越大。 +采集量是指传感器、设备或其他类型采集点采集的物理量,比如电流、电压、温度、压力、GPS 位置等,是随时间变化的,数据类型可以是整型、浮点型、布尔型,也可是字符串。随着时间的推移,存储的采集量的数据量越来越大。智能电表示例中的电流、电压、相位就是采集量。 ## 标签 (Label/Tag) -标签是指传感器、设备或其他类型采集点的静态属性,不是随时间变化的,比如设备型号、颜色、设备的所在地等,数据类型可以是任何类型。虽然是静态的,但 TDengine 容许用户修改、删除或增加标签值。与采集量不一样的是,随时间的推移,存储的标签的数据量不会有什么变化。 +标签是指传感器、设备或其他类型采集点的静态属性,不是随时间变化的,比如设备型号、颜色、设备的所在地等,数据类型可以是任何类型。虽然是静态的,但 TDengine 容许用户修改、删除或增加标签值。与采集量不一样的是,随时间的推移,存储的标签的数据量不会有什么变化。智能电表示例中的location与groupId就是标签。 ## 数据采集点 (Data Collection Point) -数据采集点是指按照预设时间周期或受事件触发采集物理量的硬件或软件。一个数据采集点可以采集一个或多个采集量,**但这些采集量都是同一时刻采集的,具有相同的时间戳**。对于复杂的设备,往往有多个数据采集点,每个数据采集点采集的周期都可能不一样,而且完全独立,不同步。比如对于一台汽车,有数据采集点专门采集 GPS 位置,有数据采集点专门采集发动机状态,有数据采集点专门采集车内的环境,这样一台汽车就有三个数据采集点。 +数据采集点是指按照预设时间周期或受事件触发采集物理量的硬件或软件。一个数据采集点可以采集一个或多个采集量,**但这些采集量都是同一时刻采集的,具有相同的时间戳**。对于复杂的设备,往往有多个数据采集点,每个数据采集点采集的周期都可能不一样,而且完全独立,不同步。比如对于一台汽车,有数据采集点专门采集 GPS 位置,有数据采集点专门采集发动机状态,有数据采集点专门采集车内的环境,这样一台汽车就有三个数据采集点。智能电表示例中的d1001, d1002, d1003, d1004等就是数据采集点。 ## 表 (Table) @@ -131,13 +133,14 @@ TDengine 建议用数据采集点的名字(如上表中的 D1001)来做表 对于复杂的设备,比如汽车,它有多个数据采集点,那么就需要为一台汽车建立多张表。 + ## 超级表 (STable) 由于一个数据采集点一张表,导致表的数量巨增,难以管理,而且应用经常需要做采集点之间的聚合操作,聚合的操作也变得复杂起来。为解决这个问题,TDengine 引入超级表(Super Table,简称为 STable)的概念。 超级表是指某一特定类型的数据采集点的集合。同一类型的数据采集点,其表的结构是完全一样的,但每个表(数据采集点)的静态属性(标签)是不一样的。描述一个超级表(某一特定类型的数据采集点的集合),除需要定义采集量的表结构之外,还需要定义其标签的 schema,标签的数据类型可以是整数、浮点数、字符串,标签可以有多个,可以事后增加、删除或修改。如果整个系统有 N 个不同类型的数据采集点,就需要建立 N 个超级表。 -在 TDengine 的设计里,**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。 +在 TDengine 的设计里,**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。智能电表示例中,我们可以创建一个超级表meters. ## 子表 (Subtable) @@ -156,7 +159,9 @@ TDengine 建议用数据采集点的名字(如上表中的 D1001)来做表 查询既可以在表上进行,也可以在超级表上进行。针对超级表的查询,TDengine 将把所有子表中的数据视为一个整体数据集进行处理,会先把满足标签过滤条件的表从超级表中找出来,然后再扫描这些表的时序数据,进行聚合操作,这样需要扫描的数据集会大幅减少,从而显著提高查询的性能。本质上,TDengine 通过对超级表查询的支持,实现了多个同类数据采集点的高效聚合。 -TDengine系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。 +TDengine系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。在智能电表的示例中,我们可以通过超级表meters创建子表d1001, d1002, d1003, d1004等。 + +为了更好地理解超级与子表的关系,可以参考下面关于智能电表数据模型的示意图。 ![智能电表数据模型示意图](./supertable.webp) ## 库 (database) diff --git a/docs/zh/04-concept/supertable.webp b/docs/zh/04-concept/supertable.webp new file mode 100644 index 0000000000000000000000000000000000000000..764b8f3de7ee92a103b2fcd0e75c03773af5ee37 Binary files /dev/null and b/docs/zh/04-concept/supertable.webp differ diff --git a/docs/zh/05-get-started/01-docker.md b/docs/zh/05-get-started/01-docker.md index 814784b649ecbaf32ec93d6b26cb40f2a98e82d8..e2be4195176a3f1ac7712a036d04b60b2fb77718 100644 --- a/docs/zh/05-get-started/01-docker.md +++ b/docs/zh/05-get-started/01-docker.md @@ -1,19 +1,17 @@ --- sidebar_label: Docker title: 通过 Docker 快速体验 TDengine +description: 使用 Docker 快速体验 TDengine 的高效写入和查询 --- -:::info -如果您希望为 TDengine 贡献代码或对内部技术实现感兴趣,请参考[TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装. -::: -本节首先介绍如何通过 Docker 快速体验 TDengine,然后介绍如何在 Docker 环境下体验 TDengine 的写入和查询功能。 +本节首先介绍如何通过 Docker 快速体验 TDengine,然后介绍如何在 Docker 环境下体验 TDengine 的写入和查询功能。如果你不熟悉 Docker,请使用[安装包的方式快速体验](../../get-started/package/)。如果您希望为 TDengine 贡献代码或对内部技术实现感兴趣,请参考 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装. ## 启动 TDengine 如果已经安装了 docker, 只需执行下面的命令。 ```shell -docker run -d -p 6030:6030 -p 6041/6041 -p 6043-6049/6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine +docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine ``` 注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。 diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md index 63698aab505a4d8d490f75cfb619ef2e069aaca7..3e0fb056a5913d3a82a473bf879a79e398176075 100644 --- a/docs/zh/05-get-started/03-package.md +++ b/docs/zh/05-get-started/03-package.md @@ -1,101 +1,117 @@ --- sidebar_label: 安装包 title: 使用安装包立即开始 +description: 使用安装包快速体验 TDengine --- import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; +import PkgListV3 from "/components/PkgListV3"; -:::info -如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装. +您可以[用 Docker 立即体验](../../get-started/docker/) TDengine。如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装. -::: +TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。目前 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../connector/rest-api/)。 -在 Linux 系统上,TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统,rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。TDengine 也提供 Windows x64 平台的安装包。 +为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。 + +在 Linux 系统上,TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统,rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。需要注意的是,rpm 和 deb 包不含 taosdump 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。TDengine 也提供 Windows x64 平台的安装包。 ## 安装 - -可以使用 apt-get 工具从官方仓库安装。 + -**安装包仓库** +1. 从列表中下载获得 deb 安装包; + +2. 进入到安装包所在目录,执行如下的安装命令: ```bash -wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add - -echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list +# 替换为下载的安装包版本 +sudo dpkg -i TDengine-server--Linux-x64.deb ``` -如果安装 Beta 版需要安装包仓库 + -```bash -echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list -``` + -**使用 apt-get 命令安装** +1. 从列表中下载获得 rpm 安装包; + +2. 进入到安装包所在目录,执行如下的安装命令: ```bash -sudo apt-get update -apt-cache policy tdengine -sudo apt-get install tdengine +# 替换为下载的安装包版本 +sudo rpm -ivh TDengine-server--Linux-x64.rpm ``` -:::tip -apt-get 方式只适用于 Debian 或 Ubuntu 系统 -:::: - -1. 从 [发布历史页面](../../releases) 下载获得 deb 安装包,例如 TDengine-server-3.0.0.0-Linux-x64.deb; -2. 进入到 TDengine-server-3.0.0.0-Linux-x64.deb 安装包所在目录,执行如下的安装命令: + + +1. 从列表中下载获得 tar.gz 安装包; + +2. 进入到安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本: ```bash -sudo dpkg -i TDengine-server-3.0.0.0-Linux-x64.deb +# 替换为下载的安装包版本 +tar -zxvf TDengine-server--Linux-x64.tar.gz ``` - - - - -1. 从 [发布历史页面](../../releases) 下载获得 rpm 安装包,例如 TDengine-server-3.0.0.0-Linux-x64.rpm; -2. 进入到 TDengine-server-3.0.0.0-Linux-x64.rpm 安装包所在目录,执行如下的安装命令: +解压后进入相应路径,执行 ```bash -sudo rpm -ivh TDengine-server-3.0.0.0-Linux-x64.rpm +sudo ./install.sh ``` +:::info +install.sh 安装脚本在执行过程中,会通过命令行交互界面询问一些配置信息。如果希望采取无交互安装方式,那么可以用 -e no 参数来执行 install.sh 脚本。运行 `./install.sh -h` 指令可以查看所有参数的详细说明信息。 +::: + - + +可以使用 apt-get 工具从官方仓库安装。 -1. 从 [发布历史页面](../../releases) 下载获得 tar.gz 安装包,例如 TDengine-server-3.0.0.0-Linux-x64.tar.gz; -2. 进入到 TDengine-server-3.0.0.0-Linux-x64.tar.gz 安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本: +**安装包仓库** ```bash -tar -zxvf TDengine-server-3.0.0.0-Linux-x64.tar.gz +wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add - +echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list ``` -解压后进入相应路径,执行 +如果安装 Beta 版需要安装包仓库 ```bash -sudo ./install.sh +wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add - +echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list ``` -:::info -install.sh 安装脚本在执行过程中,会通过命令行交互界面询问一些配置信息。如果希望采取无交互安装方式,那么可以用 -e no 参数来执行 install.sh 脚本。运行 `./install.sh -h` 指令可以查看所有参数的详细说明信息。 +**使用 apt-get 命令安装** -::: +```bash +sudo apt-get update +apt-cache policy tdengine +sudo apt-get install tdengine +``` +:::tip +apt-get 方式只适用于 Debian 或 Ubuntu 系统 +:::: + - +注意:目前 TDengine 在 Windows 平台上只支持 Windows server 2016/2019 和 Windows 10/11 系统版本。 -1. 从 [发布历史页面](../../releases) 下载获得 exe 安装程序,例如 TDengine-server-3.0.0.0-Windows-x64.exe; -2. 运行 TDengine-server-3.0.0.0-Windows-x64.exe 来安装 TDengine。 +1. 从列表中下载获得 exe 安装程序; + +2. 运行可执行程序来安装 TDengine。 +:::info +下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](../../releases/tdengine) +::: + :::note 当安装第一个节点时,出现 Enter FQDN:提示的时候,不需要输入任何内容。只有当安装第二个或以后更多的节点时,才需要输入已有集群中任何一个可用节点的 FQDN,支持该新节点加入集群。当然也可以不输入,而是在新节点启动前,配置到新节点的配置文件中。 @@ -193,7 +209,7 @@ Query OK, 2 row(s) in set (0.003128s) ## 使用 taosBenchmark 体验写入速度 -启动 TDengine 的服务,在 Linux 终端执行 `taosBenchmark` (曾命名为 `taosdemo`): +启动 TDengine 的服务,在 Linux 或 windows 终端执行 `taosBenchmark` (曾命名为 `taosdemo`): ```bash taosBenchmark diff --git a/docs/zh/05-get-started/_pkg_install.mdx b/docs/zh/05-get-started/_pkg_install.mdx deleted file mode 100644 index 83c987af8bcf24a9593105b680d32a0421344d5f..0000000000000000000000000000000000000000 --- a/docs/zh/05-get-started/_pkg_install.mdx +++ /dev/null @@ -1,17 +0,0 @@ -import PkgList from "/components/PkgList"; - -TDengine 的安装非常简单,从下载到安装成功仅仅只要几秒钟。 - -为方便使用,从 2.4.0.10 开始,标准的服务端安装包包含了 taos、taosd、taosAdapter、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。 - -在安装包格式上,我们提供 tar.gz, rpm 和 deb 格式,为企业客户提供 tar.gz 格式安装包,以方便在特定操作系统上使用。需要注意的是,rpm 和 deb 包不含 taosdump、taosBenchmark 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。 - -发布版本包括稳定版和 Beta 版,Beta 版含有更多新功能。正式上线或测试建议安装稳定版。您可以根据需要选择下载: - - - -具体的安装方法,请参见[安装包的安装和卸载](/operation/pkg-install)。 - -下载其他组件、最新 Beta 版及之前版本的安装包,请点击[这里](https://www.taosdata.com/all-downloads) - -查看 Release Notes, 请点击[这里](https://github.com/taosdata/TDengine/releases) diff --git a/docs/zh/05-get-started/index.md b/docs/zh/05-get-started/index.md index 794081b4e4c438dee2d8cbe125de4094056f190f..20f8235d87426f7a98ded2f7be431289ea00a045 100644 --- a/docs/zh/05-get-started/index.md +++ b/docs/zh/05-get-started/index.md @@ -3,7 +3,7 @@ title: 立即开始 description: '快速设置 TDengine 环境并体验其高效写入和查询' --- -TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](/reference/taosadapter) 提供 [RESTful 接口](/reference/rest-api)。 +TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../reference/taosadapter) 提供 [RESTful 接口](../connector/rest-api)。 本章主要介绍如何利用 Docker 或者安装包快速设置 TDengine 环境并体验其高效写入和查询。 diff --git a/docs/zh/07-develop/01-connect/_connect_java.mdx b/docs/zh/07-develop/01-connect/_connect_java.mdx index f5b8ea1cc2bf309bbb182be6ae06100102328a16..86c70ef7dc9a84d61fa36502f83e0be6a0836214 100644 --- a/docs/zh/07-develop/01-connect/_connect_java.mdx +++ b/docs/zh/07-develop/01-connect/_connect_java.mdx @@ -12,4 +12,4 @@ {{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}} ``` -更多连接参数配置,参考[Java 连接器](/reference/connector/java) +更多连接参数配置,参考[Java 连接器](../../connector/java) diff --git a/docs/zh/07-develop/01-connect/index.md b/docs/zh/07-develop/01-connect/index.md index 89faf812fffa281cc1c9df2371d3470252231ce2..075d99cfee78b01b66ebc527892e90b9291dd422 100644 --- a/docs/zh/07-develop/01-connect/index.md +++ b/docs/zh/07-develop/01-connect/index.md @@ -1,6 +1,6 @@ --- title: 建立连接 -description: "本节介绍如何使用连接器建立与 TDengine 的连接,给出连接器安装、连接的简单说明。" +description: 使用连接器建立与 TDengine 的连接,以及连接器的安装和连接 --- import Tabs from "@theme/Tabs"; @@ -14,10 +14,10 @@ import ConnCSNative from "./_connect_cs.mdx"; import ConnC from "./_connect_c.mdx"; import ConnR from "./_connect_r.mdx"; import ConnPHP from "./_connect_php.mdx"; -import InstallOnWindows from "../../14-reference/03-connector/_linux_install.mdx"; -import InstallOnLinux from "../../14-reference/03-connector/_windows_install.mdx"; -import VerifyLinux from "../../14-reference/03-connector/_verify_linux.mdx"; -import VerifyWindows from "../../14-reference/03-connector/_verify_windows.mdx"; +import InstallOnWindows from "../../08-connector/_linux_install.mdx"; +import InstallOnLinux from "../../08-connector/_windows_install.mdx"; +import VerifyLinux from "../../08-connector/_verify_linux.mdx"; +import VerifyWindows from "../../08-connector/_verify_windows.mdx"; TDengine 提供了丰富的应用程序开发接口,为了便于用户快速开发自己的应用,TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 C/C++、Java、Python、Go、Node.js、C#、Rust、Lua(社区贡献)和 PHP (社区贡献)的连接器。这些连接器支持使用原生接口(taosc)和 REST 接口(部分语言暂不支持)连接 TDengine 集群。社区开发者也贡献了多个非官方连接器,例如 ADO.NET 连接器、Lua 连接器和 PHP 连接器。 @@ -33,7 +33,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速 关键不同点在于: 1. 使用 REST 连接,用户无需安装客户端驱动程序 taosc,具有跨平台易用的优势,但性能要下降 30%左右。 -2. 使用原生连接可以体验 TDengine 的全部功能,如[参数绑定接口](/reference/connector/cpp#参数绑定-api)、[订阅](/reference/connector/cpp#订阅和消费-api)等等。 +2. 使用原生连接可以体验 TDengine 的全部功能,如[参数绑定接口](../../connector/cpp/#参数绑定-api)、[订阅](../../connector/cpp/#订阅和消费-api)等等。 ## 安装客户端驱动 taosc @@ -223,7 +223,7 @@ phpize && ./configure && make -j && make install **手动指定 TDengine 目录:** ```shell -phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install +phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install ``` > `--with-tdengine-dir=` 后跟上 TDengine 目录。 diff --git a/docs/zh/07-develop/02-model/index.mdx b/docs/zh/07-develop/02-model/index.mdx index be545e8813b26b3abbb22d8231590a909e935a83..d66059c2cda2a0e4629b16ca44cee036dc67546f 100644 --- a/docs/zh/07-develop/02-model/index.mdx +++ b/docs/zh/07-develop/02-model/index.mdx @@ -1,5 +1,7 @@ --- +sidebar_label: 数据建模 title: TDengine 数据建模 +description: TDengine 中如何建立数据模型 --- TDengine 采用类关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库、超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。 @@ -11,10 +13,10 @@ TDengine 采用类关系型数据模型,需要建库、建表。因此对于 不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为了在各种场景下 TDengine 都能最大效率的工作,TDengine 建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除 SQL 标准的选项外,还可以指定保留时长、副本数、缓存大小、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如: ```sql -CREATE DATABASE power KEEP 365 DURATION 10 BUFFER 16 VGROUPS 100 WAL 1; +CREATE DATABASE power KEEP 365 DURATION 10 BUFFER 16 WAL_LEVEL 1; ``` -上述语句将创建一个名为 power 的库,这个库的数据将保留 365 天(超过 365 天将被自动删除),每 10 天一个数据文件,每个 VNODE 的写入内存池的大小为 16 MB,数据库的 VGROUPS 数量,对该数据库入会写 WAL 但不执行 FSYNC。详细的语法及参数请见 [数据库管理](/taos-sql/database) 章节。 +上述语句将创建一个名为 power 的库,这个库的数据将保留 365 天(超过 365 天将被自动删除),每 10 天一个数据文件,每个 VNODE 的写入内存池的大小为 16 MB,对该数据库入会写 WAL 但不执行 FSYNC。详细的语法及参数请见 [数据库管理](/taos-sql/database) 章节。 创建库之后,需要使用 SQL 命令 `USE` 将当前库切换过来,例如: @@ -39,7 +41,7 @@ USE power; CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int); ``` -与创建普通表一样,创建超级表时,需要提供表名(示例中为 meters),表结构 Schema,即数据列的定义。第一列必须为时间戳(示例中为 ts),其他列为采集的物理量(示例中为 current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的 schema (示例中为 location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组 ID、管理员 ID 等等。标签的 schema 可以事后增加、删除、修改。具体定义以及细节请见 [TAOS SQL 的超级表管理](/taos-sql/stable) 章节。 +与创建普通表一样,创建超级表时,需要提供表名(示例中为 meters),表结构 Schema,即数据列的定义。第一列必须为时间戳(示例中为 ts),其他列为采集的物理量(示例中为 current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的 schema (示例中为 location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组 ID、管理员 ID 等等。标签的 schema 可以事后增加、删除、修改。具体定义以及细节请见 [TDengine SQL 的超级表管理](/taos-sql/stable) 章节。 每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。对于电网,我们就需要对智能电表、变压器、母线、开关等都建立一个超级表。在物联网中,一个设备就可能有多个数据采集点(比如一台风力发电的风机,有的采集点采集电流、电压等电参数,有的采集点采集温度、湿度、风向等环境参数),这个时候,对这一类型的设备,需要建立多张超级表。 @@ -53,7 +55,7 @@ TDengine 对每个数据采集点需要独立建表。与标准的关系型数 CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2); ``` -其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 "California.SanFrancisco",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TAOS SQL 的表管理](/taos-sql/table) 章节。 +其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 "California.SanFrancisco",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TDengine SQL 的表管理](/taos-sql/table) 章节。 TDengine 建议将数据采集点的全局唯一 ID 作为表名(比如设备序列号)。但对于有的场景,并没有唯一的 ID,可以将多个 ID 组合成一个唯一的 ID。不建议将具有唯一性的 ID 作为标签值。 diff --git a/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx b/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx index 214cbdaa96d02e0cd1251eeda97c6a897887cc7e..8818eaae3dc1806a00e73d9846fbd1dfe15e0c8a 100644 --- a/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx +++ b/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx @@ -23,9 +23,10 @@ import PhpStmt from "./_php_stmt.mdx"; ## SQL 写入简介 -应用通过连接器执行 INSERT 语句来插入数据,用户还可以通过 TAOS Shell,手动输入 INSERT 语句插入数据。 +应用通过连接器执行 INSERT 语句来插入数据,用户还可以通过 TDengine CLI,手动输入 INSERT 语句插入数据。 ### 一次写入一条 + 下面这条 INSERT 就将一条记录写入到表 d1001 中: ```sql @@ -48,7 +49,7 @@ TDengine 也支持一次向多个表写入数据,比如下面这条命令就 INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31); ``` -详细的 SQL INSERT 语法规则参考 [TAOS SQL 的数据写入](/taos-sql/insert)。 +详细的 SQL INSERT 语法规则参考 [TDengine SQL 的数据写入](/taos-sql/insert)。 :::info @@ -134,4 +135,3 @@ TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 类似,这
    - diff --git a/docs/zh/07-develop/03-insert-data/05-high-volume.md b/docs/zh/07-develop/03-insert-data/05-high-volume.md new file mode 100644 index 0000000000000000000000000000000000000000..d7581467ae0315442d89de395d35bbd677f75d3a --- /dev/null +++ b/docs/zh/07-develop/03-insert-data/05-high-volume.md @@ -0,0 +1,436 @@ +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +# 高效写入 + +本节介绍如何高效地向 TDengine 写入数据。 + +## 高效写入原理 {#principle} + +### 客户端程序的角度 {#application-view} + +从客户端程序的角度来说,高效写入数据要考虑以下几个因素: + +1. 单次写入的数据量。一般来讲,每批次写入的数据量越大越高效(但超过一定阈值其优势会消失)。使用 SQL 写入 TDengine 时,尽量在一条 SQL 中拼接更多数据。目前,TDengine 支持的一条 SQL 的最大长度为 1,048,576(1M)个字符。 +2. 并发连接数。一般来讲,同时写入数据的并发连接数越多写入越高效(但超过一定阈值反而会下降,取决于服务端处理能力)。 +3. 数据在不同表(或子表)之间的分布,即要写入数据的相邻性。一般来说,每批次只向同一张表(或子表)写入数据比向多张表(或子表)写入数据要更高效; +4. 写入方式。一般来讲: + - 参数绑定写入比 SQL 写入更高效。因参数绑定方式避免了 SQL 解析。(但增加了 C 接口的调用次数,对于连接器也有性能损耗)。 + - SQL 写入不自动建表比自动建表更高效。因自动建表要频繁检查表是否存在 + - SQL 写入比无模式写入更高效。因无模式写入会自动建表且支持动态更改表结构 + +客户端程序要充分且恰当地利用以上几个因素。在单次写入中尽量只向同一张表(或子表)写入数据,每批次写入的数据量经过测试和调优设定为一个最适合当前系统处理能力的数值,并发写入的连接数同样经过测试和调优后设定为一个最适合当前系统处理能力的数值,以实现在当前系统中的最佳写入速度。 + +### 数据源的角度 {#datasource-view} + +客户端程序通常需要从数据源读数据再写入 TDengine。从数据源角度来说,以下几种情况需要在读线程和写线程之间增加队列: + +1. 有多个数据源,单个数据源生成数据的速度远小于单线程写入的速度,但数据量整体比较大。此时队列的作用是把多个数据源的数据汇聚到一起,增加单次写入的数据量。 +2. 单个数据源生成数据的速度远大于单线程写入的速度。此时队列的作用是增加写入的并发度。 +3. 单张表的数据分散在多个数据源。此时队列的作用是将同一张表的数据提前汇聚到一起,提高写入时数据的相邻性。 + +如果写应用的数据源是 Kafka, 写应用本身即 Kafka 的消费者,则可利用 Kafka 的特性实现高效写入。比如: + +1. 将同一张表的数据写到同一个 Topic 的同一个 Partition,增加数据的相邻性 +2. 通过订阅多个 Topic 实现数据汇聚 +3. 通过增加 Consumer 线程数增加写入的并发度 +4. 通过增加每次 fetch 的最大数据量来增加单次写入的最大数据量 + +### 服务器配置的角度 {#setting-view} + +从服务端配置的角度,要根据系统中磁盘的数量,磁盘的 I/O 能力,以及处理器能力在创建数据库时设置适当的 vgroups 数量以充分发挥系统性能。如果 vgroups 过少,则系统性能无法发挥;如果 vgroups 过多,会造成无谓的资源竞争。常规推荐 vgroups 数量为 CPU 核数的 2 倍,但仍然要结合具体的系统资源配置进行调优。 + +更多调优参数,请参考 [数据库管理](../../../taos-sql/database) 和 [服务端配置](../../../reference/config)。 + +## 高效写入示例 {#sample-code} + +### 场景设计 {#scenario} + +下面的示例程序展示了如何高效写入数据,场景设计如下: + +- TDengine 客户端程序从其它数据源不断读入数据,在示例程序中采用生成模拟数据的方式来模拟读取数据源 +- 单个连接向 TDengine 写入的速度无法与读数据的速度相匹配,因此客户端程序启动多个线程,每个线程都建立了与 TDengine 的连接,每个线程都有一个独占的固定大小的消息队列 +- 客户端程序将接收到的数据根据所属的表名(或子表名)HASH 到不同的线程,即写入该线程所对应的消息队列,以此确保属于某个表(或子表)的数据一定会被一个固定的线程处理 +- 各个子线程在将所关联的消息队列中的数据读空后或者读取数据量达到一个预定的阈值后将该批数据写入 TDengine,并继续处理后面接收到的数据 + +![TDengine 高效写入示例场景的线程模型](highvolume.webp) + +### 示例代码 {#code} + +这一部分是针对以上场景的示例代码。对于其它场景高效写入原理相同,不过代码需要适当修改。 + +本示例代码假设源数据属于同一张超级表(meters)的不同子表。程序在开始写入数据之前已经在 test 库创建了这个超级表。对于子表,将根据收到的数据,由应用程序自动创建。如果实际场景是多个超级表,只需修改写任务自动建表的代码。 + + + + +**程序清单** + +| 类名 | 功能说明 | +| ---------------- | --------------------------------------------------------------------------- | +| FastWriteExample | 主程序 | +| ReadTask | 从模拟源中读取数据,将表名经过 hash 后得到 Queue 的 index,写入对应的 Queue | +| WriteTask | 从 Queue 中获取数据,组成一个 Batch,写入 TDengine | +| MockDataSource | 模拟生成一定数量 meters 子表的数据 | +| SQLWriter | WriteTask 依赖这个类完成 SQL 拼接、自动建表、 SQL 写入、SQL 长度检查 | +| StmtWriter | 实现参数绑定方式批量写入(暂未完成) | +| DataBaseMonitor | 统计写入速度,并每隔 10 秒把当前写入速度打印到控制台 | + + +以下是各类的完整代码和更详细的功能说明。 + +
    +FastWriteExample +主程序负责: + +1. 创建消息队列 +2. 启动写线程 +3. 启动读线程 +4. 每隔 10 秒统计一次写入速度 + +主程序默认暴露了 4 个参数,每次启动程序都可调节,用于测试和调优: + +1. 读线程个数。默认为 1。 +2. 写线程个数。默认为 3。 +3. 模拟生成的总表数。默认为 1000。将会平分给各个读线程。如果总表数较大,建表需要花费较长,开始统计的写入速度可能较慢。 +4. 每批最多写入记录数量。默认为 3000。 + +队列容量(taskQueueCapacity)也是与性能有关的参数,可通过修改程序调节。一般来讲,队列容量越大,入队被阻塞的概率越小,队列的吞吐量越大,但是内存占用也会越大。 示例程序默认值已经设置地足够大。 + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java}} +``` + +
    + +
    +ReadTask + +读任务负责从数据源读数据。每个读任务都关联了一个模拟数据源。每个模拟数据源可生成一点数量表的数据。不同的模拟数据源生成不同表的数据。 + +读任务采用阻塞的方式写消息队列。也就是说,一旦队列满了,写操作就会阻塞。 + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java}} +``` + +
    + +
    +WriteTask + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java}} +``` + +
    + +
    + +MockDataSource + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java}} +``` + +
    + +
    + +SQLWriter + +SQLWriter 类封装了拼 SQL 和写数据的逻辑。注意,所有的表都没有提前创建,而是在 catch 到表不存在异常的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它异常,这里简单地记录当时执行的 SQL 语句到日志中,你也可以记录更多线索到日志,已便排查错误和故障恢复。 + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java}} +``` + +
    + +
    + +DataBaseMonitor + +```java +{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java}} +``` + +
    + +**执行步骤** + +
    +执行 Java 示例程序 + +执行程序前需配置环境变量 `TDENGINE_JDBC_URL`。如果 TDengine Server 部署在本机,且用户名、密码和端口都是默认值,那么可配置: + +``` +TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata" +``` + +**本地集成开发环境执行示例程序** + +1. clone TDengine 仓库 + ``` + git clone git@github.com:taosdata/TDengine.git --depth 1 + ``` +2. 用集成开发环境打开 `docs/examples/java` 目录。 +3. 在开发环境中配置环境变量 `TDENGINE_JDBC_URL`。如果已配置了全局的环境变量 `TDENGINE_JDBC_URL` 可跳过这一步。 +4. 运行类 `com.taos.example.highvolume.FastWriteExample`。 + +**远程服务器上执行示例程序** + +若要在服务器上执行示例程序,可按照下面的步骤操作: + +1. 打包示例代码。在目录 TDengine/docs/examples/java 下执行: + ``` + mvn package + ``` +2. 远程服务器上创建 examples 目录: + ``` + mkdir -p examples/java + ``` +3. 复制依赖到服务器指定目录: + - 复制依赖包,只用复制一次 + ``` + scp -r .\target\lib @:~/examples/java + ``` + - 复制本程序的 jar 包,每次更新代码都需要复制 + ``` + scp -r .\target\javaexample-1.0.jar @:~/examples/java + ``` +4. 配置环境变量。 + 编辑 `~/.bash_profile` 或 `~/.bashrc` 添加如下内容例如: + + ``` + export TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata" + ``` + + 以上使用的是本地部署 TDengine Server 时默认的 JDBC URL。你需要根据自己的实际情况更改。 + +5. 用 java 命令启动示例程序,命令模板: + + ``` + java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample + ``` + +6. 结束测试程序。测试程序不会自动结束,在获取到当前配置下稳定的写入速度后,按 CTRL + C 结束程序。 + 下面是一次实际运行的日志输出,机器配置 16核 + 64G + 固态硬盘。 + + ``` + root@vm85$ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample 2 12 + 18:56:35.896 [main] INFO c.t.e.highvolume.FastWriteExample - readTaskCount=2, writeTaskCount=12 tableCount=1000 maxBatchSize=3000 + 18:56:36.011 [WriteThread-0] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.015 [WriteThread-0] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.021 [WriteThread-1] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.022 [WriteThread-1] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.031 [WriteThread-2] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.032 [WriteThread-2] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.041 [WriteThread-3] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.042 [WriteThread-3] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.093 [WriteThread-4] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.094 [WriteThread-4] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.099 [WriteThread-5] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.100 [WriteThread-5] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.100 [WriteThread-6] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.101 [WriteThread-6] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.103 [WriteThread-7] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.104 [WriteThread-7] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.105 [WriteThread-8] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.107 [WriteThread-8] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.108 [WriteThread-9] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.109 [WriteThread-9] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.156 [WriteThread-10] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.157 [WriteThread-11] INFO c.taos.example.highvolume.WriteTask - started + 18:56:36.158 [WriteThread-10] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:36.158 [ReadThread-0] INFO com.taos.example.highvolume.ReadTask - started + 18:56:36.158 [ReadThread-1] INFO com.taos.example.highvolume.ReadTask - started + 18:56:36.158 [WriteThread-11] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576 + 18:56:46.369 [main] INFO c.t.e.highvolume.FastWriteExample - count=18554448 speed=1855444 + 18:56:56.946 [main] INFO c.t.e.highvolume.FastWriteExample - count=39059660 speed=2050521 + 18:57:07.322 [main] INFO c.t.e.highvolume.FastWriteExample - count=59403604 speed=2034394 + 18:57:18.032 [main] INFO c.t.e.highvolume.FastWriteExample - count=80262938 speed=2085933 + 18:57:28.432 [main] INFO c.t.e.highvolume.FastWriteExample - count=101139906 speed=2087696 + 18:57:38.921 [main] INFO c.t.e.highvolume.FastWriteExample - count=121807202 speed=2066729 + 18:57:49.375 [main] INFO c.t.e.highvolume.FastWriteExample - count=142952417 speed=2114521 + 18:58:00.689 [main] INFO c.t.e.highvolume.FastWriteExample - count=163650306 speed=2069788 + 18:58:11.646 [main] INFO c.t.e.highvolume.FastWriteExample - count=185019808 speed=2136950 + ``` + +
    + +
    + + +**程序清单** + +Python 示例程序中采用了多进程的架构,并使用了跨进程的消息队列。 + +| 函数或类 | 功能说明 | +| ------------------------ | -------------------------------------------------------------------- | +| main 函数 | 程序入口, 创建各个子进程和消息队列 | +| run_monitor_process 函数 | 创建数据库,超级表,统计写入速度并定时打印到控制台 | +| run_read_task 函数 | 读进程主要逻辑,负责从其它数据系统读数据,并分发数据到为之分配的队列 | +| MockDataSource 类 | 模拟数据源, 实现迭代器接口,每次批量返回每张表的接下来 1000 条数据 | +| run_write_task 函数 | 写进程主要逻辑。每次从队列中取出尽量多的数据,并批量写入 | +| SQLWriter类 | SQL 写入和自动建表 | +| StmtWriter 类 | 实现参数绑定方式批量写入(暂未完成) | + + +
    +main 函数 + +main 函数负责创建消息队列和启动子进程,子进程有 3 类: + +1. 1 个监控进程,负责数据库初始化和统计写入速度 +2. n 个读进程,负责从其它数据系统读数据 +3. m 个写进程,负责写数据库 + +main 函数可以接收 5 个启动参数,依次是: + +1. 读任务(进程)数, 默认为 1 +2. 写任务(进程)数, 默认为 1 +3. 模拟生成的总表数,默认为 1000 +4. 队列大小(单位字节),默认为 1000000 +5. 每批最多写入记录数量, 默认为 3000 + +```python +{{#include docs/examples/python/fast_write_example.py:main}} +``` + +
    + +
    +run_monitor_process + +监控进程负责初始化数据库,并监控当前的写入速度。 + +```python +{{#include docs/examples/python/fast_write_example.py:monitor}} +``` + +
    + +
    + +run_read_task 函数 + +读进程,负责从其它数据系统读数据,并分发数据到为之分配的队列。 + +```python +{{#include docs/examples/python/fast_write_example.py:read}} +``` + +
    + +
    + +MockDataSource + +以下是模拟数据源的实现,我们假设数据源生成的每一条数据都带有目标表名信息。实际中你可能需要一定的规则确定目标表名。 + +```python +{{#include docs/examples/python/mockdatasource.py}} +``` + +
    + +
    +run_write_task 函数 + +写进程每次从队列中取出尽量多的数据,并批量写入。 + +```python +{{#include docs/examples/python/fast_write_example.py:write}} +``` + +
    + +
    + +SQLWriter 类封装了拼 SQL 和写数据的逻辑。所有的表都没有提前创建,而是在发生表不存在错误的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它错误会记录当时执行的 SQL, 以便排查错误和故障恢复。这个类也对 SQL 是否超过最大长度限制做了检查,根据 TDengine 3.0 的限制由输入参数 maxSQLLength 传入了支持的最大 SQL 长度,即 1048576 。 + +SQLWriter + +```python +{{#include docs/examples/python/sql_writer.py}} +``` + +
    + +**执行步骤** + +
    + +执行 Python 示例程序 + +1. 前提条件 + + - 已安装 TDengine 客户端驱动 + - 已安装 Python3, 推荐版本 >= 3.8 + - 已安装 taospy + +2. 安装 faster-fifo 代替 python 内置的 multiprocessing.Queue + + ``` + pip3 install faster-fifo + ``` + +3. 点击上面的“查看源码”链接复制 `fast_write_example.py` 、 `sql_writer.py` 和 `mockdatasource.py` 三个文件。 + +4. 执行示例程序 + + ``` + python3 fast_write_example.py + ``` + + 下面是一次实际运行的输出, 机器配置 16核 + 64G + 固态硬盘。 + + ``` + root@vm85$ python3 fast_write_example.py 8 8 + 2022-07-14 19:13:45,869 [root] - READ_TASK_COUNT=8, WRITE_TASK_COUNT=8, TABLE_COUNT=1000, QUEUE_SIZE=1000000, MAX_BATCH_SIZE=3000 + 2022-07-14 19:13:48,882 [root] - WriteTask-0 started with pid 718347 + 2022-07-14 19:13:48,883 [root] - WriteTask-1 started with pid 718348 + 2022-07-14 19:13:48,884 [root] - WriteTask-2 started with pid 718349 + 2022-07-14 19:13:48,884 [root] - WriteTask-3 started with pid 718350 + 2022-07-14 19:13:48,885 [root] - WriteTask-4 started with pid 718351 + 2022-07-14 19:13:48,885 [root] - WriteTask-5 started with pid 718352 + 2022-07-14 19:13:48,886 [root] - WriteTask-6 started with pid 718353 + 2022-07-14 19:13:48,886 [root] - WriteTask-7 started with pid 718354 + 2022-07-14 19:13:48,887 [root] - ReadTask-0 started with pid 718355 + 2022-07-14 19:13:48,888 [root] - ReadTask-1 started with pid 718356 + 2022-07-14 19:13:48,889 [root] - ReadTask-2 started with pid 718357 + 2022-07-14 19:13:48,889 [root] - ReadTask-3 started with pid 718358 + 2022-07-14 19:13:48,890 [root] - ReadTask-4 started with pid 718359 + 2022-07-14 19:13:48,891 [root] - ReadTask-5 started with pid 718361 + 2022-07-14 19:13:48,892 [root] - ReadTask-6 started with pid 718364 + 2022-07-14 19:13:48,893 [root] - ReadTask-7 started with pid 718365 + 2022-07-14 19:13:56,042 [DataBaseMonitor] - count=6676310 speed=667631.0 + 2022-07-14 19:14:06,196 [DataBaseMonitor] - count=20004310 speed=1332800.0 + 2022-07-14 19:14:16,366 [DataBaseMonitor] - count=32290310 speed=1228600.0 + 2022-07-14 19:14:26,527 [DataBaseMonitor] - count=44438310 speed=1214800.0 + 2022-07-14 19:14:36,673 [DataBaseMonitor] - count=56608310 speed=1217000.0 + 2022-07-14 19:14:46,834 [DataBaseMonitor] - count=68757310 speed=1214900.0 + 2022-07-14 19:14:57,280 [DataBaseMonitor] - count=80992310 speed=1223500.0 + 2022-07-14 19:15:07,689 [DataBaseMonitor] - count=93805310 speed=1281300.0 + 2022-07-14 19:15:18,020 [DataBaseMonitor] - count=106111310 speed=1230600.0 + 2022-07-14 19:15:28,356 [DataBaseMonitor] - count=118394310 speed=1228300.0 + 2022-07-14 19:15:38,690 [DataBaseMonitor] - count=130742310 speed=1234800.0 + 2022-07-14 19:15:49,000 [DataBaseMonitor] - count=143051310 speed=1230900.0 + 2022-07-14 19:15:59,323 [DataBaseMonitor] - count=155276310 speed=1222500.0 + 2022-07-14 19:16:09,649 [DataBaseMonitor] - count=167603310 speed=1232700.0 + 2022-07-14 19:16:19,995 [DataBaseMonitor] - count=179976310 speed=1237300.0 + ``` + +
    + +:::note +使用 Python 连接器多进程连接 TDengine 的时候,有一个限制:不能在父进程中建立连接,所有连接只能在子进程中创建。 +如果在父进程中创建连接,子进程再创建连接就会一直阻塞。这是个已知问题。 + +::: + +
    +
    + + diff --git a/docs/zh/07-develop/03-insert-data/highvolume.webp b/docs/zh/07-develop/03-insert-data/highvolume.webp new file mode 100644 index 0000000000000000000000000000000000000000..46dfc74ae3b0043c591ff930c62251da49cae7ad Binary files /dev/null and b/docs/zh/07-develop/03-insert-data/highvolume.webp differ diff --git a/docs/zh/07-develop/03-insert-data/index.md b/docs/zh/07-develop/03-insert-data/index.md index 55a28e4a8ba13501e2f481c9aba67b7300da98d0..f1e5ada4dfd350e982fa0ae57412af07ac43e03a 100644 --- a/docs/zh/07-develop/03-insert-data/index.md +++ b/docs/zh/07-develop/03-insert-data/index.md @@ -1,5 +1,7 @@ --- +sidebar_label: 写入数据 title: 写入数据 +description: TDengine 的各种写入方式 --- TDengine 支持多种写入协议,包括 SQL,InfluxDB Line 协议, OpenTSDB Telnet 协议,OpenTSDB JSON 格式协议。数据可以单条插入,也可以批量插入,可以插入一个数据采集点的数据,也可以同时插入多个数据采集点的数据。同时,TDengine 支持多线程插入,支持时间乱序数据插入,也支持历史数据插入。InfluxDB Line 协议、OpenTSDB Telnet 协议和 OpenTSDB JSON 格式协议是 TDengine 支持的三种无模式写入协议。使用无模式方式写入无需提前创建超级表和子表,并且引擎能自适用数据对表结构做调整。 diff --git a/docs/zh/07-develop/04-query-data/index.mdx b/docs/zh/07-develop/04-query-data/index.mdx index 68f49d9f2b36fce83dc76e43e36f1049ae3de18d..d6156c8a59a70af80f2632cdf3801ef7281b69d5 100644 --- a/docs/zh/07-develop/04-query-data/index.mdx +++ b/docs/zh/07-develop/04-query-data/index.mdx @@ -1,4 +1,5 @@ --- +sidebar_label: 查询数据 title: 查询数据 description: "主要查询功能,通过连接器执行同步查询和异步查询" --- @@ -43,7 +44,7 @@ Query OK, 2 row(s) in set (0.001100s) 为满足物联网场景的需求,TDengine 支持几个特殊的函数,比如 twa(时间加权平均),spread (最大值与最小值的差),last_row(最后一条记录)等,更多与物联网场景相关的函数将添加进来。 -具体的查询语法请看 [TAOS SQL 的数据查询](../../taos-sql/select) 章节。 +具体的查询语法请看 [TDengine SQL 的数据查询](../../taos-sql/select) 章节。 ## 多表聚合查询 @@ -51,77 +52,78 @@ Query OK, 2 row(s) in set (0.001100s) ### 示例一 -在 TAOS Shell,查找加利福尼亚州所有智能电表采集的电压平均值,并按照 location 分组。 +在 TDengine CLI,查找加利福尼亚州所有智能电表采集的电压平均值,并按照 location 分组。 ``` -taos> SELECT AVG(voltage) FROM meters GROUP BY location; - avg(voltage) | location | -============================================================= - 222.000000000 | California.LosAngeles | - 219.200000000 | California.SanFrancisco | -Query OK, 2 row(s) in set (0.002136s) +taos> SELECT AVG(voltage), location FROM meters GROUP BY location; + avg(voltage) | location | +=============================================================================================== + 219.200000000 | California.SanFrancisco | + 221.666666667 | California.LosAngeles | +Query OK, 2 rows in database (0.005995s) ``` ### 示例二 -在 TAOS shell, 查找 groupId 为 2 的所有智能电表过去 24 小时的记录条数,电流的最大值。 +在 TDengine CLI, 查找 groupId 为 2 的所有智能电表的记录条数,电流的最大值。 ``` -taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h; +taos> SELECT count(*), max(current) FROM meters where groupId = 2; cunt(*) | max(current) | ================================== 5 | 13.4 | Query OK, 1 row(s) in set (0.002136s) ``` -在 [TAOS SQL 的数据查询](../../taos-sql/select) 一章,查询类操作都会注明是否支持超级表。 +在 [TDengine SQL 的数据查询](../../taos-sql/select) 一章,查询类操作都会注明是否支持超级表。 ## 降采样查询、插值 物联网场景里,经常需要通过降采样(down sampling)将采集的数据按时间段进行聚合。TDengine 提供了一个简便的关键词 interval 让按照时间窗口的查询操作变得极为简单。比如,将智能电表 d1001 采集的电流值每 10 秒钟求和 ``` -taos> SELECT sum(current) FROM d1001 INTERVAL(10s); - ts | sum(current) | +taos> SELECT _wstart, sum(current) FROM d1001 INTERVAL(10s); + _wstart | sum(current) | ====================================================== 2018-10-03 14:38:00.000 | 10.300000191 | 2018-10-03 14:38:10.000 | 24.900000572 | -Query OK, 2 row(s) in set (0.000883s) +Query OK, 2 rows in database (0.003139s) ``` 降采样操作也适用于超级表,比如:将加利福尼亚州所有智能电表采集的电流值每秒钟求和 ``` -taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s); - ts | sum(current) | +taos> SELECT _wstart, SUM(current) FROM meters where location like "California%" INTERVAL(1s); + _wstart | sum(current) | ====================================================== 2018-10-03 14:38:04.000 | 10.199999809 | - 2018-10-03 14:38:05.000 | 32.900000572 | + 2018-10-03 14:38:05.000 | 23.699999809 | 2018-10-03 14:38:06.000 | 11.500000000 | 2018-10-03 14:38:15.000 | 12.600000381 | - 2018-10-03 14:38:16.000 | 36.000000000 | -Query OK, 5 row(s) in set (0.001538s) + 2018-10-03 14:38:16.000 | 34.400000572 | +Query OK, 5 rows in database (0.007413s) ``` 降采样操作也支持时间偏移,比如:将所有智能电表采集的电流值每秒钟求和,但要求每个时间窗口从 500 毫秒开始 ``` -taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a); - ts | sum(current) | +taos> SELECT _wstart, SUM(current) FROM meters INTERVAL(1s, 500a); + _wstart | sum(current) | ====================================================== - 2018-10-03 14:38:04.500 | 11.189999809 | - 2018-10-03 14:38:05.500 | 31.900000572 | - 2018-10-03 14:38:06.500 | 11.600000000 | - 2018-10-03 14:38:15.500 | 12.300000381 | - 2018-10-03 14:38:16.500 | 35.000000000 | -Query OK, 5 row(s) in set (0.001521s) + 2018-10-03 14:38:03.500 | 10.199999809 | + 2018-10-03 14:38:04.500 | 10.300000191 | + 2018-10-03 14:38:05.500 | 13.399999619 | + 2018-10-03 14:38:06.500 | 11.500000000 | + 2018-10-03 14:38:14.500 | 12.600000381 | + 2018-10-03 14:38:16.500 | 34.400000572 | +Query OK, 6 rows in database (0.005515s) ``` 物联网场景里,每个数据采集点采集数据的时间是难同步的,但很多分析算法(比如 FFT)需要把采集的数据严格按照时间等间隔的对齐,在很多系统里,需要应用自己写程序来处理,但使用 TDengine 的降采样操作就轻松解决。 如果一个时间间隔里,没有采集的数据,TDengine 还提供插值计算的功能。 -语法规则细节请见 [TAOS SQL 的按时间窗口切分聚合](../../taos-sql/distinguished) 章节。 +语法规则细节请见 [TDengine SQL 的按时间窗口切分聚合](../../taos-sql/distinguished) 章节。 ## 示例代码 diff --git a/docs/zh/07-develop/06-stream.md b/docs/zh/07-develop/06-stream.md index ab4fdf900422f1e76787c49fba68dc6a09f6a473..d5296582d500e3271130bc1bfc6de34492133a8a 100644 --- a/docs/zh/07-develop/06-stream.md +++ b/docs/zh/07-develop/06-stream.md @@ -4,8 +4,16 @@ description: "TDengine 流式计算将数据的写入、预处理、复杂分析 title: 流式计算 --- -在时序数据的处理中,经常要对原始数据进行清洗、预处理,再使用时序数据库进行长久的储存。用户通常需要在时序数据库之外再搭建 Kafka、Flink、Spark 等流计算处理引擎,增加了用户的开发成本和维护成本。 -使用 TDengine 3.0 的流式计算引擎能够最大限度的减少对这些额外中间件的依赖,真正将数据的写入、预处理、长期存储、复杂分析、实时计算、实时报警触发等功能融为一体,并且,所有这些任务只需要使用 SQL 完成,极大降低了用户的学习成本、使用成本。 +在时序数据的处理中,经常要对原始数据进行清洗、预处理,再使用时序数据库进行长久的储存。在传统的时序数据解决方案中,常常需要部署 Kafka、Flink 等流处理系统。而流处理系统的复杂性,带来了高昂的开发与运维成本。 + +TDengine 3.0 的流式计算引擎提供了实时处理写入的数据流的能力,使用 SQL 定义实时流变换,当数据被写入流的源表后,数据会被以定义的方式自动处理,并根据定义的触发模式向目的表推送结果。它提供了替代复杂流处理系统的轻量级解决方案,并能够在高吞吐的数据写入的情况下,提供毫秒级的计算结果延迟。 + +流式计算可以包含数据过滤,标量函数计算(含UDF),以及窗口聚合(支持滑动窗口、会话窗口与状态窗口),可以以超级表、子表、普通表为源表,写入到目的超级表。在创建流时,目的超级表将被自动创建,随后新插入的数据会被流定义的方式处理并写入其中,通过 partition by 子句,可以以表名或标签划分 partition,不同的 partition 将写入到目的超级表的不同子表。 + +TDengine 的流式计算能够支持分布在多个 vnode 中的超级表聚合;还能够处理乱序数据的写入:它提供了 watermark 机制以度量容忍数据乱序的程度,并提供了 ignore expired 配置项以决定乱序数据的处理策略——丢弃或者重新计算。 + +详见 [流式计算](../../taos-sql/stream) + ## 流式计算的创建 @@ -14,7 +22,7 @@ CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name AS subq stream_options: { TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time] WATERMARK time - IGNORE EXPIRED + IGNORE EXPIRED [0 | 1] } ``` @@ -59,7 +67,7 @@ insert into d1004 values("2018-10-03 14:38:05.000", 10.80000, 223, 0.29000); insert into d1004 values("2018-10-03 14:38:06.500", 11.50000, 221, 0.35000); ``` -### 查询以观查结果 +### 查询以观察结果 ```sql taos> select start, end, max_current from current_stream_output_stb; @@ -88,7 +96,7 @@ create stream power_stream into power_stream_output_stb as select ts, concat_ws( 参考示例一 [写入数据](#写入数据) -### 查询以观查结果 +### 查询以观察结果 ```sql taos> select ts, meter_location, active_power, reactive_power from power_stream_output_stb; ts | meter_location | active_power | reactive_power | @@ -102,4 +110,4 @@ taos> select ts, meter_location, active_power, reactive_power from power_stream_ 2018-10-03 14:38:16.800 | California.SanFrancisco.d1001 | 2588.728381186 | 829.240910475 | 2018-10-03 14:38:16.650 | California.SanFrancisco.d1002 | 2175.595991997 | 555.520860397 | Query OK, 8 rows in database (0.014753s) -``` \ No newline at end of file +``` diff --git a/docs/zh/07-develop/07-tmq.md b/docs/zh/07-develop/07-tmq.md deleted file mode 100644 index 25d468cad3658190f6b9409637543061ac22f958..0000000000000000000000000000000000000000 --- a/docs/zh/07-develop/07-tmq.md +++ /dev/null @@ -1,249 +0,0 @@ ---- -sidebar_label: 数据订阅 -description: "数据订阅与推送服务。写入到 TDengine 中的时序数据能够被自动推送到订阅客户端。" -title: 数据订阅 ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; -import Java from "./_sub_java.mdx"; -import Python from "./_sub_python.mdx"; -import Go from "./_sub_go.mdx"; -import Rust from "./_sub_rust.mdx"; -import Node from "./_sub_node.mdx"; -import CSharp from "./_sub_cs.mdx"; -import CDemo from "./_sub_c.mdx"; - - -为了帮助应用实时获取写入 TDengine 的数据,或者以事件到达顺序处理数据,TDengine提供了类似消息队列产品的数据订阅、消费接口。这样在很多场景下,采用 TDengine 的时序数据处理系统不再需要集成消息队列产品,比如 kafka, 从而简化系统设计的复杂度,降低运营维护成本。 - -与 kafka 一样,你需要定义 topic, 但 TDengine 的 topic 是基于一个已经存在的超级表、子表或普通表的查询条件,即一个 SELECT 语句。你可以使用 SQL 对标签、表名、列、表达式等条件进行过滤,以及对数据进行标量函数与 UDF 计算(不包括数据聚合)。与其他消息队列软件相比,这是 TDengine 数据订阅功能的最大的优势,它提供了更大的灵活性,数据的颗粒度可以由应用随时调整,而且数据的过滤与预处理交给 TDengine,而不是应用完成,有效的减少传输的数据量与应用的复杂度。 - -消费者订阅 topic 后,可以实时获得最新的数据。多个消费者可以组成一个消费者组 (consumer group), 一个消费者组里的多个消费者共享消费进度,便于多线程、分布式地消费数据,提高消费速度。但不同消费者组中的消费者即使消费同一个topic, 并不共享消费进度。一个消费者可以订阅多个 topic。如果订阅的是超级表,数据可能会分布在多个不同的 vnode 上,也就是多个 shard 上,这样一个消费组里有多个消费者可以提高消费效率。TDengine 的消息队列提供了消息的ACK机制,在宕机、重启等复杂环境下确保 at least once 消费。 - -为了实现上述功能,TDengine 会为 WAL (Write-Ahead-Log) 文件自动创建索引以支持快速随机访问,并提供了灵活可配置的文件切换与保留机制:用户可以按需指定 WAL 文件保留的时间以及大小(详见 create database 语句)。通过以上方式将 WAL 改造成了一个保留事件到达顺序的、可持久化的存储引擎(但由于 TSDB 具有远比 WAL 更高的压缩率,我们不推荐保留太长时间,一般来说,不超过几天)。 对于以 topic 形式创建的查询,TDengine 将对接 WAL 而不是 TSDB 作为其存储引擎。在消费时,TDengine 根据当前消费进度从 WAL 直接读取数据,并使用统一的查询引擎实现过滤、变换等操作,将数据推送给消费者。 - -本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。 - -## 主要数据结构和API - -TMQ 的 API 中,与订阅相关的主要数据结构和API如下: - -```c -typedef struct tmq_t tmq_t; -typedef struct tmq_conf_t tmq_conf_t; -typedef struct tmq_list_t tmq_list_t; - -typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); - -DLL_EXPORT tmq_list_t *tmq_list_new(); -DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); -DLL_EXPORT void tmq_list_destroy(tmq_list_t *); -DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); -DLL_EXPORT const char *tmq_err2str(int32_t code); - -DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); -DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); -DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); -DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); -DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); -DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); - -enum tmq_conf_res_t { - TMQ_CONF_UNKNOWN = -2, - TMQ_CONF_INVALID = -1, - TMQ_CONF_OK = 0, -}; -typedef enum tmq_conf_res_t tmq_conf_res_t; - -DLL_EXPORT tmq_conf_t *tmq_conf_new(); -DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); -DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); -DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); -``` - -这些 API 的文档请见 [C/C++ Connector](/reference/connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面C语言的示例代码。 - -## 写入数据 - -首先完成建库、建一张超级表和多张子表操作,然后就可以写入数据了,比如: - -```sql -drop database if exists tmqdb; -create database tmqdb; -create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16) tags(t1 int, t3 varchar(16)); -create table tmqdb.ctb0 using tmqdb.stb tags(0, "subtable0"); -create table tmqdb.ctb1 using tmqdb.stb tags(1, "subtable1"); -insert into tmqdb.ctb0 values(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00'); -insert into tmqdb.ctb1 values(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11'); -``` - -## 创建topic: - -```sql -create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1; -``` - -TMQ支持多种订阅类型: - -### 列订阅 - -语法:CREATE TOPIC topic_name as subquery -通过select语句订阅(包括select *,或select ts, c1等指定列描述订阅,可以带条件过滤、标量函数计算,但不支持聚合函数、不支持时间窗口聚合) - -- TOPIC一旦创建则schema确定 -- 被订阅或用于计算的column和tag不可被删除、修改 -- 若发生schema变更,新增的column不出现在结果中 - -### 超级表订阅 -语法:CREATE TOPIC topic_name AS STABLE stbName - -与select * from stbName订阅的区别是: -- 不会限制用户的schema变更 -- 返回的是非结构化的数据:返回数据的schema会随之超级表的schema变化而变化 -- 用户对于要处理的每一个数据块都可能有不同的schema,因此,必须重新获取schema -- 返回数据不带有tag - -## 创建 consumer 以及consumer group - -对于consumer, 目前支持的config包括: - -| 参数名称 | 参数值 | 备注 | -| ---------------------------- | ------------------------------ | ------------------------------------------------------ | -| group.id | 最大长度:192 | | -| enable.auto.commit | 合法值:true, false | | -| auto.commit.interval.ms | | | -| auto.offset.reset | 合法值:earliest, latest, none | | -| td.connect.ip | 用于连接,同taos_connect的参数 | | -| td.connect.user | 用于连接,同taos_connect的参数 | | -| td.connect.pass | 用于连接,同taos_connect的参数 | | -| td.connect.port | 用于连接,同taos_connect的参数 | | -| enable.heartbeat.background | 合法值:true, false | 开启后台心跳,即consumer不会因为长时间不poll而认为离线 | -| experimental.snapshot.enable | 合法值:true, false | 从wal开始消费,还是从tsbs开始消费 | -| msg.with.table.name | 合法值:true, false | 从消息中能否解析表名 | - -```sql -/* 根据需要,设置消费组(group.id)、自动提交(enable.auto.commit)、自动提交时间间隔(auto.commit.interval.ms)、用户名(td.connect.user)、密码(td.connect.pass)等参数 */ - tmq_conf_t* conf = tmq_conf_new(); - tmq_conf_set(conf, "enable.auto.commit", "true"); - tmq_conf_set(conf, "auto.commit.interval.ms", "1000"); - tmq_conf_set(conf, "group.id", "cgrpName"); - tmq_conf_set(conf, "td.connect.user", "root"); - tmq_conf_set(conf, "td.connect.pass", "taosdata"); - tmq_conf_set(conf, "auto.offset.reset", "earliest"); - tmq_conf_set(conf, "experimental.snapshot.enable", "true"); - tmq_conf_set(conf, "msg.with.table.name", "true"); - tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); - - tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); - tmq_conf_destroy(conf); -``` - -上述配置中包括consumer group ID,如果多个 consumer 指定的 consumer group ID一样,则自动形成一个consumer group,共享消费进度。 - - -## 创建 topic 列表 - -单个consumer支持同时订阅多个topic。 - -```sql - tmq_list_t* topicList = tmq_list_new(); - tmq_list_append(topicList, "topicName"); -``` - -## 启动订阅并开始消费 - -``` - /* 启动订阅 */ - tmq_subscribe(tmq, topicList); - tmq_list_destroy(topicList); - - /* 循环poll消息 */ - while (running) { - TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeOut); - msg_process(tmqmsg); - } -``` - -这里是一个 **while** 循环,每调用一次tmq_consumer_poll(),获取一个消息,该消息与普通查询返回的结果集完全相同,可以使用相同的解析API完成消息内容的解析。 - -## 结束消费 - -```sql - /* 取消订阅 */ - tmq_unsubscribe(tmq); - - /* 关闭消费 */ - tmq_consumer_close(tmq); -``` - -## 删除topic - -如果不再需要,可以删除创建topic,但注意:只有没有被订阅的topic才能别删除。 - -```sql - /* 删除topic */ - drop topic topicName; -``` - -## 状态查看 - -1、topics:查询已经创建的topic - -```sql - show topics; -``` - -2、consumers:查询consumer的状态及其订阅的topic - -```sql - show consumers; -``` - -3、subscriptions:查询consumer与vgroup之间的分配关系 - -```sql - show subscriptions; -``` - -## 示例代码 - -本节展示各种语言的示例代码。 - - - - -```c -{{#include examples/c/tmq.c}} -``` - - - - - - - - - - - - - - - - -```python -{{#include docs/examples/python/tmq_example.py}} -``` - - - - - - - - - - - - diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2f5c13d9b0bc0e3940fb99b45c693e2ae80c8f47 --- /dev/null +++ b/docs/zh/07-develop/07-tmq.mdx @@ -0,0 +1,839 @@ +--- +sidebar_label: 数据订阅 +description: "数据订阅与推送服务。写入到 TDengine 中的时序数据能够被自动推送到订阅客户端。" +title: 数据订阅 +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; +import Java from "./_sub_java.mdx"; +import Python from "./_sub_python.mdx"; +import Go from "./_sub_go.mdx"; +import Rust from "./_sub_rust.mdx"; +import Node from "./_sub_node.mdx"; +import CSharp from "./_sub_cs.mdx"; +import CDemo from "./_sub_c.mdx"; + +为了帮助应用实时获取写入 TDengine 的数据,或者以事件到达顺序处理数据,TDengine 提供了类似消息队列产品的数据订阅、消费接口。这样在很多场景下,采用 TDengine 的时序数据处理系统不再需要集成消息队列产品,比如 kafka, 从而简化系统设计的复杂度,降低运营维护成本。 + +与 kafka 一样,你需要定义 *topic*, 但 TDengine 的 *topic* 是基于一个已经存在的超级表、子表或普通表的查询条件,即一个 `SELECT` 语句。你可以使用 SQL 对标签、表名、列、表达式等条件进行过滤,以及对数据进行标量函数与 UDF 计算(不包括数据聚合)。与其他消息队列软件相比,这是 TDengine 数据订阅功能的最大的优势,它提供了更大的灵活性,数据的颗粒度可以由应用随时调整,而且数据的过滤与预处理交给 TDengine,而不是应用完成,有效的减少传输的数据量与应用的复杂度。 + +消费者订阅 *topic* 后,可以实时获得最新的数据。多个消费者可以组成一个消费者组 (consumer group), 一个消费者组里的多个消费者共享消费进度,便于多线程、分布式地消费数据,提高消费速度。但不同消费者组中的消费者即使消费同一个 topic, 并不共享消费进度。一个消费者可以订阅多个 topic。如果订阅的是超级表,数据可能会分布在多个不同的 vnode 上,也就是多个 shard 上,这样一个消费组里有多个消费者可以提高消费效率。TDengine 的消息队列提供了消息的 ACK 机制,在宕机、重启等复杂环境下确保 at least once 消费。 + +为了实现上述功能,TDengine 会为 WAL (Write-Ahead-Log) 文件自动创建索引以支持快速随机访问,并提供了灵活可配置的文件切换与保留机制:用户可以按需指定 WAL 文件保留的时间以及大小(详见 create database 语句)。通过以上方式将 WAL 改造成了一个保留事件到达顺序的、可持久化的存储引擎(但由于 TSDB 具有远比 WAL 更高的压缩率,我们不推荐保留太长时间,一般来说,不超过几天)。 对于以 topic 形式创建的查询,TDengine 将对接 WAL 而不是 TSDB 作为其存储引擎。在消费时,TDengine 根据当前消费进度从 WAL 直接读取数据,并使用统一的查询引擎实现过滤、变换等操作,将数据推送给消费者。 + +本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。 + +## 主要数据结构和 API + +不同语言下, TMQ 订阅相关的 API 及数据结构如下: + + + + +```c +typedef struct tmq_t tmq_t; +typedef struct tmq_conf_t tmq_conf_t; +typedef struct tmq_list_t tmq_list_t; + +typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); + +DLL_EXPORT tmq_list_t *tmq_list_new(); +DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); +DLL_EXPORT void tmq_list_destroy(tmq_list_t *); +DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); +DLL_EXPORT const char *tmq_err2str(int32_t code); + +DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); +DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); +DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); +DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); +DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); +DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + +enum tmq_conf_res_t { + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, +}; +typedef enum tmq_conf_res_t tmq_conf_res_t; + +DLL_EXPORT tmq_conf_t *tmq_conf_new(); +DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); +DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); +DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); +``` + +这些 API 的文档请见 [C/C++ Connector](../../connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。 + + + + +```java +void subscribe(Collection topics) throws SQLException; + +void unsubscribe() throws SQLException; + +Set subscription() throws SQLException; + +ConsumerRecords poll(Duration timeout) throws SQLException; + +void commitAsync(); + +void commitAsync(OffsetCommitCallback callback); + +void commitSync() throws SQLException; + +void close() throws SQLException; +``` + + + + + +```python +class TaosConsumer(): + def __init__(self, *topics, **configs) + + def __iter__(self) + + def __next__(self) + + def sync_next(self) + + def subscription(self) + + def unsubscribe(self) + + def close(self) + + def __del__(self) +``` + + + + + +```go +func NewConsumer(conf *Config) (*Consumer, error) + +func (c *Consumer) Close() error + +func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error + +func (c *Consumer) FreeMessage(message unsafe.Pointer) + +func (c *Consumer) Poll(timeout time.Duration) (*Result, error) + +func (c *Consumer) Subscribe(topics []string) error + +func (c *Consumer) Unsubscribe() error +``` + + + + + +```rust +impl TBuilder for TmqBuilder + fn from_dsn(dsn: D) -> Result + fn build(&self) -> Result + +impl AsAsyncConsumer for Consumer + async fn subscribe, I: IntoIterator + Send>( + &mut self, + topics: I, + ) -> Result<(), Self::Error>; + fn stream( + &self, + ) -> Pin< + Box< + dyn '_ + + Send + + futures::Stream< + Item = Result<(Self::Offset, MessageSet), Self::Error>, + >, + >, + >; + async fn commit(&self, offset: Self::Offset) -> Result<(), Self::Error>; + + async fn unsubscribe(self); +``` + +可在 上查看详细 API 说明。 + + + + + +```js +function TMQConsumer(config) + +function subscribe(topic) + +function consume(timeout) + +function subscription() + +function unsubscribe() + +function commit(msg) + +function close() +``` + + + + + +```csharp +ConsumerBuilder(IEnumerable> config) + +virtual IConsumer Build() + +Consumer(ConsumerBuilder builder) + +void Subscribe(IEnumerable topics) + +void Subscribe(string topic) + +ConsumeResult Consume(int millisecondsTimeout) + +List Subscription() + +void Unsubscribe() + +void Commit(ConsumeResult consumerResult) + +void Close() +``` + + + + +## 写入数据 + +首先完成建库、建一张超级表和多张子表操作,然后就可以写入数据了,比如: + +```sql +DROP DATABASE IF EXISTS tmqdb; +CREATE DATABASE tmqdb; +CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16) TAGS(t1 INT, t3 VARCHAR(16)); +CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0"); +CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1"); +INSERT INTO tmqdb.ctb0 VALUES(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00'); +INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11'); +``` + +## 创建 *topic* + +TDengine 使用 SQL 创建一个 topic: + +```sql +CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1; +``` + +TMQ 支持多种订阅类型: + +### 列订阅 + +语法: + +```sql +CREATE TOPIC topic_name as subquery +``` + +通过 `SELECT` 语句订阅(包括 `SELECT *`,或 `SELECT ts, c1` 等指定列订阅,可以带条件过滤、标量函数计算,但不支持聚合函数、不支持时间窗口聚合)。需要注意的是: + +- 该类型 TOPIC 一旦创建则订阅数据的结构确定。 +- 被订阅或用于计算的列或标签不可被删除(`ALTER table DROP`)、修改(`ALTER table MODIFY`)。 +- 若发生表结构变更,新增的列不出现在结果中,若发生列删除则会报错。 + +### 超级表订阅 + +语法: + +```sql +CREATE TOPIC topic_name AS STABLE stb_name +``` + +与 `SELECT * from stbName` 订阅的区别是: + +- 不会限制用户的表结构变更。 +- 返回的是非结构化的数据:返回数据的结构会随之超级表的表结构变化而变化。 +- 用户对于要处理的每一个数据块都可能有不同的表结构。 +- 返回数据不包含标签。 + +### 数据库订阅 + +语法: + +```sql +CREATE TOPIC topic_name [WITH META] AS DATABASE db_name; +``` + +通过该语句可创建一个包含数据库所有表数据的订阅,`WITH META` 可选择将数据库结构变动信息加入到订阅消息流,TMQ 将消费当前数据库下所有表结构的变动,包括超级表的创建与删除,列添加、删除或修改,子表的创建、删除及 TAG 变动信息等等。消费者可通过 API 来判断具体的消息类型。这一点也是与 Kafka 不同的地方。 + +## 创建消费者 *consumer* + +消费者需要通过一系列配置选项创建,基础配置项如下表所示: + +| 参数名称 | 类型 | 参数说明 | 备注 | +| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- | +| `td.connect.ip` | string | 用于创建连接,同 `taos_connect` | | +| `td.connect.user` | string | 用于创建连接,同 `taos_connect` | | +| `td.connect.pass` | string | 用于创建连接,同 `taos_connect` | +| `td.connect.port` | integer | 用于创建连接,同 `taos_connect` | +| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192。 | +| `client.id` | string | 客户端 ID | 最大长度:192。 | +| `auto.offset.reset` | enum | 消费组订阅的初始位置 | 可选:`earliest`, `latest`, `none`(default) | +| `enable.auto.commit` | boolean | 启用自动提交 | 合法值:`true`, `false`。 | +| `auto.commit.interval.ms` | integer | 以毫秒为单位的自动提交时间间隔 | +| `enable.heartbeat.background` | boolean | 启用后台心跳,启用后即使长时间不 poll 消息也不会造成离线 | | +| `experimental.snapshot.enable` | boolean | 从 WAL 开始消费,还是从 TSBS 开始消费 | | +| `msg.with.table.name` | boolean | 是否允许从消息中解析表名 | + +对于不同编程语言,其设置方式如下: + + + + +```c +/* 根据需要,设置消费组 (group.id)、自动提交 (enable.auto.commit)、 + 自动提交时间间隔 (auto.commit.interval.ms)、用户名 (td.connect.user)、密码 (td.connect.pass) 等参数 */ +tmq_conf_t* conf = tmq_conf_new(); +tmq_conf_set(conf, "enable.auto.commit", "true"); +tmq_conf_set(conf, "auto.commit.interval.ms", "1000"); +tmq_conf_set(conf, "group.id", "cgrpName"); +tmq_conf_set(conf, "td.connect.user", "root"); +tmq_conf_set(conf, "td.connect.pass", "taosdata"); +tmq_conf_set(conf, "auto.offset.reset", "earliest"); +tmq_conf_set(conf, "experimental.snapshot.enable", "true"); +tmq_conf_set(conf, "msg.with.table.name", "true"); +tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); + +tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); +tmq_conf_destroy(conf); +``` + + + + +对于 Java 程序,使用如下配置项: + +| 参数名称 | 类型 | 参数说明 | +| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- | +| `bootstrap.servers` | string | 连接地址,如 `localhost:6030` | +| `value.deserializer` | string | 值解析方法,使用此方法应实现 `com.taosdata.jdbc.tmq.Deserializer` 接口或继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer` 类 | +| `value.deserializer.encoding` | string | 指定字符串解析的字符集 | | + +需要注意:此处使用 `bootstrap.servers` 替代 `td.connect.ip` 和 `td.connect.port`,以提供与 Kafka 一致的接口。 + +```java +Properties properties = new Properties(); +properties.setProperty("enable.auto.commit", "true"); +properties.setProperty("auto.commit.interval.ms", "1000"); +properties.setProperty("group.id", "cgrpName"); +properties.setProperty("bootstrap.servers", "127.0.0.1:6030"); +properties.setProperty("td.connect.user", "root"); +properties.setProperty("td.connect.pass", "taosdata"); +properties.setProperty("auto.offset.reset", "earliest"); +properties.setProperty("msg.with.table.name", "true"); +properties.setProperty("value.deserializer", "com.taos.example.MetersDeserializer"); + +TaosConsumer consumer = new TaosConsumer<>(properties); + +/* value deserializer definition. */ +import com.taosdata.jdbc.tmq.ReferenceDeserializer; + +public class MetersDeserializer extends ReferenceDeserializer { +} +``` + + + + + +```go +config := tmq.NewConfig() +defer config.Destroy() +err = config.SetGroupID("test") +if err != nil { + panic(err) +} +err = config.SetAutoOffsetReset("earliest") +if err != nil { + panic(err) +} +err = config.SetConnectIP("127.0.0.1") +if err != nil { + panic(err) +} +err = config.SetConnectUser("root") +if err != nil { + panic(err) +} +err = config.SetConnectPass("taosdata") +if err != nil { + panic(err) +} +err = config.SetConnectPort("6030") +if err != nil { + panic(err) +} +err = config.SetMsgWithTableName(true) +if err != nil { + panic(err) +} +err = config.EnableHeartBeat() +if err != nil { + panic(err) +} +err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) { + if result.ErrCode != 0 { + errStr := wrapper.TMQErr2Str(result.ErrCode) + err := errors.NewError(int(result.ErrCode), errStr) + panic(err) + } +}) +if err != nil { + panic(err) +} +``` + + + + + +```rust +let mut dsn: Dsn = "taos://".parse()?; +dsn.set("group.id", "group1"); +dsn.set("client.id", "test"); +dsn.set("auto.offset.reset", "earliest"); + +let tmq = TmqBuilder::from_dsn(dsn)?; + +let mut consumer = tmq.build()?; +``` + + + + + +Python 使用以下配置项创建一个 Consumer 实例。 + +| 参数名称 | 类型 | 参数说明 | 备注 | +| :----------------------------: | :----: | -------------------------------------------------------- | ------------------------------------------- | +| `td_connect_ip` | string | 用于创建连接,同 `taos_connect` | | +| `td_connect_user` | string | 用于创建连接,同 `taos_connect` | | +| `td_connect_pass` | string | 用于创建连接,同 `taos_connect` | | +| `td_connect_port` | string | 用于创建连接,同 `taos_connect` | | +| `group_id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192。 | +| `client_id` | string | 客户端 ID | 最大长度:192。 | +| `auto_offset_reset` | string | 消费组订阅的初始位置 | 可选:`earliest`, `latest`, `none`(default) | +| `enable_auto_commit` | string | 启用自动提交 | 合法值:`true`, `false`。 | +| `auto_commit_interval_ms` | string | 以毫秒为单位的自动提交时间间隔 | | +| `enable_heartbeat_background` | string | 启用后台心跳,启用后即使长时间不 poll 消息也不会造成离线 | 合法值:`true`, `false` | +| `experimental_snapshot_enable` | string | 从 WAL 开始消费,还是从 TSBS 开始消费 | 合法值:`true`, `false` | +| `msg_with_table_name` | string | 是否允许从消息中解析表名 | 合法值:`true`, `false` | +| `timeout` | int | 消费者拉去的超时时间 | | + + + + + +```js +// 根据需要,设置消费组 (group.id)、自动提交 (enable.auto.commit)、 +// 自动提交时间间隔 (auto.commit.interval.ms)、用户名 (td.connect.user)、密码 (td.connect.pass) 等参数 + +let consumer = taos.consumer({ + 'enable.auto.commit': 'true', + 'auto.commit.interval.ms','1000', + 'group.id': 'tg2', + 'td.connect.user': 'root', + 'td.connect.pass': 'taosdata', + 'auto.offset.reset','earliest', + 'msg.with.table.name': 'true', + 'td.connect.ip','127.0.0.1', + 'td.connect.port','6030' + }); +``` + + + + + +```csharp +using TDengineTMQ; + +// 根据需要,设置消费组 (GourpId)、自动提交 (EnableAutoCommit)、 +// 自动提交时间间隔 (AutoCommitIntervalMs)、用户名 (TDConnectUser)、密码 (TDConnectPasswd) 等参数 +var cfg = new ConsumerConfig + { + EnableAutoCommit = "true" + AutoCommitIntervalMs = "1000" + GourpId = "TDengine-TMQ-C#", + TDConnectUser = "root", + TDConnectPasswd = "taosdata", + AutoOffsetReset = "earliest" + MsgWithTableName = "true", + TDConnectIp = "127.0.0.1", + TDConnectPort = "6030" + }; + +var consumer = new ConsumerBuilder(cfg).Build(); + +``` + + + + + +上述配置中包括 consumer group ID,如果多个 consumer 指定的 consumer group ID 一样,则自动形成一个 consumer group,共享消费进度。 + +## 订阅 *topics* + +一个 consumer 支持同时订阅多个 topic。 + + + + +```c +// 创建订阅 topics 列表 +tmq_list_t* topicList = tmq_list_new(); +tmq_list_append(topicList, "topicName"); +// 启动订阅 +tmq_subscribe(tmq, topicList); +tmq_list_destroy(topicList); + +``` + + + + +```java +List topics = new ArrayList<>(); +topics.add("tmq_topic"); +consumer.subscribe(topics); +``` + + + + +```go +consumer, err := tmq.NewConsumer(config) +if err != nil { + panic(err) +} +err = consumer.Subscribe([]string{"example_tmq_topic"}) +if err != nil { + panic(err) +} +``` + + + + +```rust +consumer.subscribe(["tmq_meters"]).await?; +``` + + + + + +```python +consumer = TaosConsumer('topic_ctb_column', group_id='vg2') +``` + + + + + +```js +// 创建订阅 topics 列表 +let topics = ['topic_test'] + +// 启动订阅 +consumer.subscribe(topics); +``` + + + + + +```csharp +// 创建订阅 topics 列表 +List topics = new List(); +topics.add("tmq_topic"); +// 启动订阅 +consumer.Subscribe(topics); +``` + + + + + +## 消费 + +以下代码展示了不同语言下如何对 TMQ 消息进行消费。 + + + + +```c +// 消费数据 +while (running) { + TAOS_RES* msg = tmq_consumer_poll(tmq, timeOut); + msg_process(msg); +} +``` + +这里是一个 **while** 循环,每调用一次 tmq_consumer_poll(),获取一个消息,该消息与普通查询返回的结果集完全相同,可以使用相同的解析 API 完成消息内容的解析。 + + + + +```java +while(running){ + ConsumerRecords meters = consumer.poll(Duration.ofMillis(100)); + for (Meters meter : meters) { + processMsg(meter); + } +} +``` + + + + + +```go +for { + result, err := consumer.Poll(time.Second) + if err != nil { + panic(err) + } + fmt.Println(result) + consumer.Commit(context.Background(), result.Message) + consumer.FreeMessage(result.Message) +} +``` + + + + + +```rust +{ + let mut stream = consumer.stream(); + + while let Some((offset, message)) = stream.try_next().await? { + // get information from offset + + // the topic + let topic = offset.topic(); + // the vgroup id, like partition id in kafka. + let vgroup_id = offset.vgroup_id(); + println!("* in vgroup id {vgroup_id} of topic {topic}\n"); + + if let Some(data) = message.into_data() { + while let Some(block) = data.fetch_raw_block().await? { + // one block for one table, get table name if needed + let name = block.table_name(); + let records: Vec = block.deserialize().try_collect()?; + println!( + "** table: {}, got {} records: {:#?}\n", + name.unwrap(), + records.len(), + records + ); + } + } + consumer.commit(offset).await?; + } +} +``` + + + + +```python +for msg in consumer: + for row in msg: + print(row) +``` + + + + + +```js +while(true){ + msg = consumer.consume(200); + // process message(consumeResult) + console.log(msg.topicPartition); + console.log(msg.block); + console.log(msg.fields) +} +``` + + + + + +```csharp +// 消费数据 +while (true) +{ + var consumerRes = consumer.Consume(100); + // process ConsumeResult + ProcessMsg(consumerRes); + consumer.Commit(consumerRes); +} +``` + + + + + +## 结束消费 + +消费结束后,应当取消订阅。 + + + + +```c +/* 取消订阅 */ +tmq_unsubscribe(tmq); + +/* 关闭消费者对象 */ +tmq_consumer_close(tmq); +``` + + + + +```java +/* 取消订阅 */ +consumer.unsubscribe(); + +/* 关闭消费 */ +consumer.close(); +``` + + + + + +```go +consumer.Close() +``` + + + + + +```rust +consumer.unsubscribe().await; +``` + + + + + +```py +# 取消订阅 +consumer.unsubscribe() +# 关闭消费 +consumer.close() +``` + + + + +```js +consumer.unsubscribe(); +consumer.close(); +``` + + + + + +```csharp +// 取消订阅 +consumer.Unsubscribe(); + +// 关闭消费 +consumer.Close(); +``` + + + + + +## 删除 *topic* + +如果不再需要订阅数据,可以删除 topic,需要注意:只有当前未在订阅中的 TOPIC 才能被删除。 + +```sql +/* 删除 topic */ +DROP TOPIC topic_name; +``` + +## 状态查看 + +1、*topics*:查询已经创建的 topic + +```sql +SHOW TOPICS; +``` + +2、consumers:查询 consumer 的状态及其订阅的 topic + +```sql +SHOW CONSUMERS; +``` + +3、subscriptions:查询 consumer 与 vgroup 之间的分配关系 + +```sql +SHOW SUBSCRIPTIONS; +``` + +## 示例代码 + +以下是各语言的完整示例代码。 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/zh/07-develop/08-cache.md b/docs/zh/07-develop/08-cache.md index bd9da6062d3cc1a21be418079f0fee40520f4460..29e28e3dde0816d9e5a08f74abd2382854d336da 100644 --- a/docs/zh/07-develop/08-cache.md +++ b/docs/zh/07-develop/08-cache.md @@ -20,11 +20,11 @@ create database db0 vgroups 100 buffer 16MB ## 读缓存 -在创建数据库时可以选择是否缓存该数据库中每个子表的最新数据。由参数 cachelast 设置,分为三种情况: -- 0: 不缓存 -- 1: 缓存子表最近一行数据,这将显著改善 last_row 函数的性能 -- 2: 缓存子表每一列最近的非 NULL 值,这将显著改善无特殊影响(比如 WHERE, ORDER BY, GROUP BY, INTERVAL)时的 last 函数的性能 -- 3: 同时缓存行和列,即等同于上述 cachelast 值为 1 或 2 时的行为同时生效 +在创建数据库时可以选择是否缓存该数据库中每个子表的最新数据。由参数 cachemodel 设置,分为四种情况: +- none: 不缓存 +- last_row: 缓存子表最近一行数据,这将显著改善 last_row 函数的性能 +- last_value: 缓存子表每一列最近的非 NULL 值,这将显著改善无特殊影响(比如 WHERE, ORDER BY, GROUP BY, INTERVAL)时的 last 函数的性能 +- both: 同时缓存最近的行和列,即等同于上述 cachemodel 值为 last_row 和 last_value 的行为同时生效 ## 元数据缓存 diff --git a/docs/zh/07-develop/_sub_c.mdx b/docs/zh/07-develop/_sub_c.mdx index da492a0269f064d8cdf9dfb80969894131d94015..b0667268e9978533e84e68ea3fe5f285538df762 100644 --- a/docs/zh/07-develop/_sub_c.mdx +++ b/docs/zh/07-develop/_sub_c.mdx @@ -1,3 +1,3 @@ ```c -{{#include docs/examples/c/subscribe_demo.c}} -``` \ No newline at end of file +{{#include docs/examples/c/tmq_example.c}} +``` diff --git a/docs/zh/07-develop/_sub_java.mdx b/docs/zh/07-develop/_sub_java.mdx index 52df23f7dd0dbdc9810b1e53d66c4fcfd610759e..d14b5fd6095dd90f89dd2c2e828858585cfddff9 100644 --- a/docs/zh/07-develop/_sub_java.mdx +++ b/docs/zh/07-develop/_sub_java.mdx @@ -1,7 +1,11 @@ ```java {{#include docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java}} +{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}} +{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}} ``` -:::note -目前 Java 接口没有提供异步订阅模式,但用户程序可以通过创建 `TimerTask` 等方式达到同样的效果。 - -::: \ No newline at end of file +```java +{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}} +``` +```java +{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}} +``` \ No newline at end of file diff --git a/docs/zh/07-develop/_sub_python.mdx b/docs/zh/07-develop/_sub_python.mdx index 490b76fca6deb61e61dc59c2096b30742a7d25f7..1309da5b416799492a6b85aae4b775e227c0ad6e 100644 --- a/docs/zh/07-develop/_sub_python.mdx +++ b/docs/zh/07-develop/_sub_python.mdx @@ -1,3 +1,3 @@ ```py -{{#include docs/examples/python/subscribe_demo.py}} -``` \ No newline at end of file +{{#include docs/examples/python/tmq_example.py}} +``` diff --git a/docs/zh/07-develop/_sub_rust.mdx b/docs/zh/07-develop/_sub_rust.mdx index afb8d79daa3bbd72d72795cb4425f12277d710fc..0021666a7024a9b63d6b9c38bf8a57b6eded6d66 100644 --- a/docs/zh/07-develop/_sub_rust.mdx +++ b/docs/zh/07-develop/_sub_rust.mdx @@ -1,3 +1,3 @@ -```rs +```rust {{#include docs/examples/rust/nativeexample/examples/subscribe_demo.rs}} -``` \ No newline at end of file +``` diff --git a/docs/zh/07-develop/index.md b/docs/zh/07-develop/index.md index 4d0f3c3cea3da3d70051dd07f835c34b4f47c3cd..efaffaea71ce68ee0a8ddbf5634c4150adc94bfb 100644 --- a/docs/zh/07-develop/index.md +++ b/docs/zh/07-develop/index.md @@ -1,5 +1,7 @@ --- title: 开发指南 +sidebar_label: 开发指南 +description: 让开发者能够快速上手的指南 --- 开发一个应用,如果你准备采用TDengine作为时序数据处理的工具,那么有如下几个事情要做: @@ -12,7 +14,7 @@ title: 开发指南 7. 在很多场景下(如车辆管理),应用需要获取每个数据采集点的最新状态,那么建议你采用TDengine的cache功能,而不用单独部署Redis等缓存软件。 8. 如果你发现TDengine的函数无法满足你的要求,那么你可以使用用户自定义函数来解决问题。 -本部分内容就是按照上述的顺序组织的。为便于理解,TDengine为每个功能为每个支持的编程语言都提供了示例代码。如果你希望深入了解SQL的使用,需要查看[SQL手册](/taos-sql/)。如果想更深入地了解各连接器的使用,请阅读[连接器参考指南](/reference/connector/)。如果还希望想将TDengine与第三方系统集成起来,比如Grafana, 请参考[第三方工具](/third-party/)。 +本部分内容就是按照上述的顺序组织的。为便于理解,TDengine为每个功能为每个支持的编程语言都提供了示例代码。如果你希望深入了解SQL的使用,需要查看[SQL手册](/taos-sql/)。如果想更深入地了解各连接器的使用,请阅读[连接器参考指南](../connector/)。如果还希望想将TDengine与第三方系统集成起来,比如Grafana, 请参考[第三方工具](../third-party/)。 如果在开发过程中遇到任何问题,请点击每个页面下方的["反馈问题"](https://github.com/taosdata/TDengine/issues/new/choose), 在GitHub上直接递交issue。 diff --git a/docs/zh/14-reference/02-rest-api/02-rest-api.mdx b/docs/zh/08-connector/02-rest-api.mdx similarity index 60% rename from docs/zh/14-reference/02-rest-api/02-rest-api.mdx rename to docs/zh/08-connector/02-rest-api.mdx index ba43aa30fd3593b9bab4a1f76de1913087e419fc..e254244657b457e10bc2daab020b230c9a8bb2cc 100644 --- a/docs/zh/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs/zh/08-connector/02-rest-api.mdx @@ -1,8 +1,10 @@ --- title: REST API +sidebar_label: REST API +description: 详细介绍 TDengine 提供的 RESTful API. --- -为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 REST API。为最大程度降低学习成本,不同于其他数据库 REST API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。REST 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。 +为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 REST API。为最大程度降低学习成本,不同于其他数据库 REST API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。REST 连接器的使用参见 [视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。 :::note 与原生连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。支持在 RESTful URL 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 URL 中指定的这个 db_name。 @@ -10,7 +12,7 @@ title: REST API ## 安装 -RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安装任何 TDengine 的库,只要客户端的开发语言支持 HTTP 协议即可。 +RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安装任何 TDengine 的库,只要客户端的开发语言支持 HTTP 协议即可。TDengine 的 RESTful API 由 [taosAdapter](../../reference/taosadapter) 提供,在使用 RESTful API 之前需要确保 `taosAdapter` 正常运行。 ## 验证 @@ -20,8 +22,10 @@ RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安 下面示例是列出所有的数据库,请把 h1.taosdata.com 和 6041(缺省值)替换为实际运行的 TDengine 服务 FQDN 和端口号: -```html -curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.taosdata.com:6041/rest/sql +```bash +curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" \ + -d "select name, ntables, status from information_schema.ins_databases;" \ + h1.taosdata.com:6041/rest/sql ``` 返回值结果如下表示验证通过: @@ -35,188 +39,27 @@ curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.t "VARCHAR", 64 ], - [ - "create_time", - "TIMESTAMP", - 8 - ], - [ - "vgroups", - "SMALLINT", - 2 - ], [ "ntables", "BIGINT", 8 ], - [ - "replica", - "TINYINT", - 1 - ], - [ - "strict", - "VARCHAR", - 4 - ], - [ - "duration", - "VARCHAR", - 10 - ], - [ - "keep", - "VARCHAR", - 32 - ], - [ - "buffer", - "INT", - 4 - ], - [ - "pagesize", - "INT", - 4 - ], - [ - "pages", - "INT", - 4 - ], - [ - "minrows", - "INT", - 4 - ], - [ - "maxrows", - "INT", - 4 - ], - [ - "comp", - "TINYINT", - 1 - ], - [ - "precision", - "VARCHAR", - 2 - ], [ "status", "VARCHAR", 10 - ], - [ - "retention", - "VARCHAR", - 60 - ], - [ - "single_stable", - "BOOL", - 1 - ], - [ - "cachemodel", - "VARCHAR", - 11 - ], - [ - "cachesize", - "INT", - 4 - ], - [ - "wal_level", - "TINYINT", - 1 - ], - [ - "wal_fsync_period", - "INT", - 4 - ], - [ - "wal_retention_period", - "INT", - 4 - ], - [ - "wal_retention_size", - "BIGINT", - 8 - ], - [ - "wal_roll_period", - "INT", - 4 - ], - [ - "wal_seg_size", - "BIGINT", - 8 ] ], "data": [ [ "information_schema", - null, - null, - 14, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null, - "ready", - null, - null, - null, - null, - null, - null, - null, - null, - null, - null + 16, + "ready" ], [ "performance_schema", - null, - null, - 3, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null, - "ready", - null, - null, - null, - null, - null, - null, - null, - null, - null, - null + 9, + "ready" ] ], "rows": 2 @@ -231,21 +74,21 @@ http://:/rest/sql/[db_name] 参数说明: -- fqnd: 集群中的任一台主机 FQDN 或 IP 地址 -- port: 配置文件中 httpPort 配置项,缺省为 6041 +- fqnd: 集群中的任一台主机 FQDN 或 IP 地址。 +- port: 配置文件中 httpPort 配置项,缺省为 6041。 - db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。 例如:`http://h1.taos.com:6041/rest/sql/test` 是指向地址为 `h1.taos.com:6041` 的 URL,并将默认使用的数据库库名设置为 `test`。 HTTP 请求的 Header 里需带有身份认证信息,TDengine 支持 Basic 认证与自定义认证两种机制,后续版本将提供标准安全的数字签名机制来做身份验证。 -- [自定义身份认证信息](#自定义授权码)如下所示 +- [自定义身份认证信息](#自定义授权码)如下所示: ```text Authorization: Taosd ``` -- Basic 身份认证信息如下所示 +- Basic 身份认证信息如下所示: ```text Authorization: Basic @@ -259,13 +102,13 @@ HTTP 请求的 BODY 里就是一个完整的 SQL 语句,SQL 语句中的数据 curl -L -H "Authorization: Basic " -d "" :/rest/sql/[db_name] ``` -或者 +或者, ```bash curl -L -u username:password -d "" :/rest/sql/[db_name] ``` -其中,`TOKEN` 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==` +其中,`TOKEN` 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`。 ## HTTP 返回格式 @@ -282,27 +125,9 @@ curl -L -u username:password -d "" :/rest/sql/[db_name] ### HTTP body 结构 -
    - - - - - - - - - - - - - - - - - - - - -
    执行结果说明样例
    正确执行 - code:(int)0 代表成功 -
    -
    - column_meta:([][3]any)列信息,每个列会用三个值来说明,分别为:列名(string)、列类型(string)、类型长度(int) -
    -
    - rows:(int)数据返回行数 -
    -
    - data:([][]any)具体数据内容 -
    +#### 正确执行 + +样例: ```json { @@ -313,23 +138,16 @@ curl -L -u username:password -d "" :/rest/sql/[db_name] } ``` -
    正确查询 - code:(int)0 代表成功 -
    -
    - column_meta:([][3]any) 列信息,每个列会用三个值来说明,分别为:列名(string)、列类型(string)、类型长度(int) -
    -
    - rows:(int)数据返回行数 -
    -
    - data:([][]any)具体数据内容 -
    +说明: + +- code:(`int`)0 代表成功。 +- column_meta:(`[1][3]any`)只返回 `[["affected_rows", "INT", 4]]`。 +- rows:(`int`)只返回 `1`。 +- data:(`[][]any`)返回受影响行数。 + +#### 正确查询 + +样例: ```json { @@ -385,17 +203,35 @@ curl -L -u username:password -d "" :/rest/sql/[db_name] } ``` -
    错误 - code:(int)错误码 -
    -
    - desc:(string)错误描述 -
    +说明: + +- code:(`int`)0 代表成功。 +- column_meta:(`[][3]any`) 列信息,每个列会用三个值来说明,分别为:列名(string)、列类型(string)、类型长度(int)。 +- rows:(`int`)数据返回行数。 +- data:(`[][]any`)具体数据内容(时间格式仅支持 RFC3339,结果集为 0 时区)。 + +列类型使用如下字符串: + +- "NULL" +- "BOOL" +- "TINYINT" +- "SMALLINT" +- "INT" +- "BIGINT" +- "FLOAT" +- "DOUBLE" +- "VARCHAR" +- "TIMESTAMP" +- "NCHAR" +- "TINYINT UNSIGNED" +- "SMALLINT UNSIGNED" +- "INT UNSIGNED" +- "BIGINT UNSIGNED" +- "JSON" + +#### 错误 + +样例: ```json { @@ -404,30 +240,10 @@ curl -L -u username:password -d "" :/rest/sql/[db_name] } ``` -
    - -### 说明 - -- 时间格式仅支持 RFC3339,结果集为 0 时区 -- 列类型使用如下字符串: - > "NULL" - > "BOOL" - > "TINYINT" - > "SMALLINT" - > "INT" - > "BIGINT" - > "FLOAT" - > "DOUBLE" - > "VARCHAR" - > "TIMESTAMP" - > "NCHAR" - > "TINYINT UNSIGNED" - > "SMALLINT UNSIGNED" - > "INT UNSIGNED" - > "BIGINT UNSIGNED" - > "JSON" +说明: + +- code:(`int`)错误码。 +- desc:(`string`)错误描述。 ## 自定义授权码 @@ -439,11 +255,9 @@ curl http://:/rest/login// 其中,`fqdn` 是 TDengine 数据库的 FQDN 或 IP 地址,`port` 是 TDengine 服务的端口号,`username` 为数据库用户名,`password` 为数据库密码,返回值为 JSON 格式,各字段含义如下: -- status:请求结果的标志位 - -- code:返回值代码 - -- desc:授权码 +- status:请求结果的标志位。 +- code:返回值代码。 +- desc:授权码。 获取授权码示例: diff --git a/docs/zh/14-reference/03-connector/cpp.mdx b/docs/zh/08-connector/03-cpp.mdx similarity index 85% rename from docs/zh/14-reference/03-connector/cpp.mdx rename to docs/zh/08-connector/03-cpp.mdx index 3a8367ef3351fd049e10cea706beb2e07cc4aece..c0bd33f12964537699849e35644a8c04e0f716f0 100644 --- a/docs/zh/14-reference/03-connector/cpp.mdx +++ b/docs/zh/08-connector/03-cpp.mdx @@ -1,5 +1,4 @@ --- -sidebar_position: 1 sidebar_label: C/C++ title: C/C++ Connector --- @@ -22,7 +21,7 @@ TDengine 客户端驱动的动态库位于: ## 支持的平台 -请参考[支持的平台列表](/reference/connector#支持的平台) +请参考[支持的平台列表](../#支持的平台) ## 支持的版本 @@ -30,7 +29,7 @@ TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一 ## 安装步骤 -TDengine 客户端驱动的安装请参考 [安装指南](/reference/connector#安装步骤) +TDengine 客户端驱动的安装请参考 [安装指南](../#安装步骤) ## 建立连接 @@ -404,47 +403,3 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多 **支持版本** 该功能接口从 2.3.0.0 版本开始支持。 - -### 订阅和消费 API - -订阅 API 目前支持订阅一张或多张表,并通过定期轮询的方式不断获取写入表中的最新数据。 - -- `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)` - - 该函数负责启动订阅服务,成功时返回订阅对象,失败时返回 `NULL`,其参数为: - - - taos:已经建立好的数据库连接 - - restart:如果订阅已经存在,是重新开始,还是继续之前的订阅 - - topic:订阅的主题(即名称),此参数是订阅的唯一标识 - - sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据 - - fp:收到查询结果时的回调函数(稍后介绍函数原型),只在异步调用时使用,同步调用时此参数应该传 `NULL` - - param:调用回调函数时的附加参数,系统 API 将其原样传递到回调函数,不进行任何处理 - - interval:轮询周期,单位为毫秒。异步调用时,将根据此参数周期性的调用回调函数,为避免对系统性能造成影响,不建议将此参数设置的过小;同步调用时,如两次调用 `taos_consume()` 的间隔小于此周期,API 将会阻塞,直到时间间隔超过此周期。 - -- `typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)` - - 异步模式下,回调函数的原型,其参数为: - - - tsub:订阅对象 - - res:查询结果集,注意结果集中可能没有记录 - - param:调用 `taos_subscribe()` 时客户程序提供的附加参数 - - code:错误码 - - :::note - 在这个回调函数里不可以做耗时过长的处理,尤其是对于返回的结果集中数据较多的情况,否则有可能导致客户端阻塞等异常状态。如果必须进行复杂计算,则建议在另外的线程中进行处理。 - - ::: - -- `TAOS_RES *taos_consume(TAOS_SUB *tsub)` - - 同步模式下,该函数用来获取订阅的结果。 用户应用程序将其置于一个循环之中。 如两次调用 `taos_consume()` 的间隔小于订阅的轮询周期,API 将会阻塞,直到时间间隔超过此周期。如果数据库有新记录到达,该 API 将返回该最新的记录,否则返回一个没有记录的空结果集。 如果返回值为 `NULL`,说明系统出错。 异步模式下,用户程序不应调用此 API。 - - :::note - 在调用 `taos_consume()` 之后,用户应用应确保尽快调用 `taos_fetch_row()` 或 `taos_fetch_block()` 来处理订阅结果,否则服务端会持续缓存查询结果数据等待客户端读取,极端情况下会导致服务端内存消耗殆尽,影响服务稳定性。 - - ::: - -- `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)` - - 取消订阅。 如参数 `keepProgress` 不为 0,API 会保留订阅的进度信息,后续调用 `taos_subscribe()` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。 - diff --git a/docs/zh/14-reference/03-connector/java.mdx b/docs/zh/08-connector/04-java.mdx similarity index 94% rename from docs/zh/14-reference/03-connector/java.mdx rename to docs/zh/08-connector/04-java.mdx index e33d09c1ce69e0c96dedac198b73f425c531b4cc..6b1715f8c6a2f949fca552885ea3920f43e8a849 100644 --- a/docs/zh/14-reference/03-connector/java.mdx +++ b/docs/zh/08-connector/04-java.mdx @@ -1,6 +1,5 @@ --- toc_max_heading_level: 4 -sidebar_position: 2 sidebar_label: Java title: TDengine Java Connector description: TDengine Java 连接器基于标准 JDBC API 实现, 并提供原生连接与 REST连接两种连接器。 @@ -35,7 +34,7 @@ REST 连接支持所有能运行 Java 的平台。 ## 版本支持 -请参考[版本支持列表](/reference/connector#版本支持) +请参考[版本支持列表](../#版本支持) ## TDengine DataType 和 Java DataType @@ -64,7 +63,7 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对 使用 Java Connector 连接数据库前,需要具备以下条件: - 已安装 Java 1.8 或以上版本运行时环境和 Maven 3.6 或以上版本 -- 已安装 TDengine 客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装),具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动) +- 已安装 TDengine 客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装),具体步骤请参考[安装客户端驱动](../#安装客户端驱动) ### 安装连接器 @@ -83,7 +82,7 @@ Maven 项目中,在 pom.xml 中添加以下依赖: com.taosdata.jdbc taos-jdbcdriver - 2.0.** + 3.0.0 ``` @@ -93,12 +92,12 @@ Maven 项目中,在 pom.xml 中添加以下依赖: 可以通过下载 TDengine 的源码,自己编译最新版本的 Java connector ```shell -git clone https://github.com/taosdata/taos-connector-jdbc.git --branch 2.0 +git clone https://github.com/taosdata/taos-connector-jdbc.git cd taos-connector-jdbc mvn clean install -Dmaven.test.skip=true ``` -编译后,在 target 目录下会产生 taos-jdbcdriver-2.0.XX-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。 +编译后,在 target 目录下会产生 taos-jdbcdriver-3.0.*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。 @@ -131,7 +130,7 @@ url 中的配置参数如下: - charset:客户端使用的字符集,默认值为系统字符集。 - locale:客户端语言环境,默认值系统当前 locale。 - timezone:客户端使用的时区,默认值为系统当前时区。 -- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。开启批量拉取同时获取一批数据在查询数据量较大时批量拉取可以有效的提升查询性能。 +- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:true。开启批量拉取同时获取一批数据在查询数据量较大时批量拉取可以有效的提升查询性能。 - batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败将继续执行下面的 SQL。false:不再执行失败 SQL 后的任何语句。默认值为:false。 JDBC 原生连接的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。 @@ -198,7 +197,7 @@ url 中的配置参数如下: - user:登录 TDengine 用户名,默认值 'root'。 - password:用户登录密码,默认值 'taosdata'。 -- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。从 taos-jdbcdriver-2.0.38 开始,JDBC REST 连接增加批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。 +- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。JDBC REST 连接支持批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。 - charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。 - batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。 - httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 5000。 @@ -216,7 +215,7 @@ url 中的配置参数如下: INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6); ``` -- 从 taos-jdbcdriver-2.0.36 开始,如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); +- 如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); ::: @@ -230,7 +229,7 @@ INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFra **注意**: - 应用中设置的 client parameter 为进程级别的,即如果要更新 client 的参数,需要重启应用。这是因为 client parameter 是全局参数,仅在应用程序的第一次设置生效。 -- 以下示例代码基于 taos-jdbcdriver-2.0.36。 +- 以下示例代码基于 taos-jdbcdriver-3.0.0。 ```java public Connection getConn() throws Exception{ @@ -367,7 +366,7 @@ TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据 **注意**: - JDBC REST 连接目前不支持参数绑定 -- 以下示例代码基于 taos-jdbcdriver-2.0.36 +- 以下示例代码基于 taos-jdbcdriver-3.0.0 - binary 类型数据需要调用 setString 方法,nchar 类型数据需要调用 setNString 方法 - setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽 @@ -630,12 +629,12 @@ public void setNString(int columnIndex, ArrayList list, int size) throws ### 无模式写入 -TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../schemaless)。 +TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../reference/schemaless/)。 **注意**: - JDBC REST 连接目前不支持无模式写入 -- 以下示例代码基于 taos-jdbcdriver-2.0.36 +- 以下示例代码基于 taos-jdbcdriver-3.0.0 ```java public class SchemalessInsertTest { @@ -666,7 +665,7 @@ public class SchemalessInsertTest { } ``` -### 订阅 +### 数据订阅 TDengine Java 连接器支持订阅功能,应用 API 如下: @@ -712,14 +711,19 @@ while(true) { } ``` -`poll` 方法返回一个结果集,其中包含从上次 `poll` 到目前为止的所有新数据。请务必按需选择合理的调用 `poll` 的频率(如例子中的 `Duration.ofMillis(100)`),否则会给服务端造成不必要的压力。 +`poll` 每次调用获取一个消息。 #### 关闭订阅 ```java +// 取消订阅 +consumer.unsubscribe(); +// 关闭消费 consumer.close() ``` +详情请参考:[数据订阅](../../../develop/tmq) + ### 使用示例如下: ```java @@ -734,7 +738,7 @@ public abstract class ConsumerLoop { config.setProperty("msg.with.table.name", "true"); config.setProperty("enable.auto.commit", "true"); config.setProperty("group.id", "group1"); - config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer"); + config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer"); this.consumer = new TaosConsumer<>(config); this.topics = Collections.singletonList("topic_speed"); @@ -754,8 +758,9 @@ public abstract class ConsumerLoop { process(record); } } + consumer.unsubscribe(); } finally { - consumer.close(); + consumer.close(); shutdownLatch.countDown(); } } @@ -765,11 +770,11 @@ public abstract class ConsumerLoop { shutdownLatch.await(); } - static class ResultDeserializer extends ReferenceDeserializer { + public static class ResultDeserializer extends ReferenceDeserializer { } - static class ResultBean { + public static class ResultBean { private Timestamp ts; private int speed; @@ -875,6 +880,7 @@ public static void main(String[] args) throws Exception { | taos-jdbcdriver 版本 | 主要变化 | | :------------------: | :----------------------------: | +| 3.0.0 | 支持 TDengine 3.0 | | 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 | | 2.0.38 | JDBC REST 连接增加批量拉取功能 | | 2.0.37 | 增加对 json tag 支持 | @@ -900,7 +906,13 @@ public static void main(String[] args) throws Exception { **解决方法**:重新安装 64 位 JDK。 -4. 其它问题请参考 [FAQ](../../../train-faq/faq) +4. java.lang.NoSuchMethodError: setByteArray + + **原因**:taos-jdbcdriver 3.* 版本仅支持 TDengine 3.0 及以上版本。 + + **解决方法**: 使用 taos-jdbcdriver 2.* 版本连接 TDengine 2.* 版本。 + +其它问题请参考 [FAQ](../../../train-faq/faq) ## API 参考 diff --git a/docs/zh/14-reference/03-connector/go.mdx b/docs/zh/08-connector/05-go.mdx similarity index 95% rename from docs/zh/14-reference/03-connector/go.mdx rename to docs/zh/08-connector/05-go.mdx index a87c948d4a3c0e0764e6c4823608bf7d8b171f24..9d30f75190cddbb17c40e97655002a158cd6aae6 100644 --- a/docs/zh/14-reference/03-connector/go.mdx +++ b/docs/zh/08-connector/05-go.mdx @@ -9,11 +9,11 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import Preparition from "./_preparition.mdx" -import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx" -import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx" -import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx" -import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx" -import GoQuery from "../../07-develop/04-query-data/_go.mdx" +import GoInsert from "../07-develop/03-insert-data/_go_sql.mdx" +import GoInfluxLine from "../07-develop/03-insert-data/_go_line.mdx" +import GoOpenTSDBTelnet from "../07-develop/03-insert-data/_go_opts_telnet.mdx" +import GoOpenTSDBJson from "../07-develop/03-insert-data/_go_opts_json.mdx" +import GoQuery from "../07-develop/04-query-data/_go.mdx" `driver-go` 是 TDengine 的官方 Go 语言连接器,实现了 Go 语言[ database/sql ](https://golang.org/pkg/database/sql/) 包的接口。Go 开发人员可以通过它开发存取 TDengine 集群数据的应用软件。 @@ -30,7 +30,7 @@ REST 连接支持所有能运行 Go 的平台。 ## 版本支持 -请参考[版本支持列表](/reference/connector#版本支持) +请参考[版本支持列表](../#版本支持) ## 支持的功能特性 @@ -56,7 +56,7 @@ REST 连接支持所有能运行 Go 的平台。 ### 安装前准备 * 安装 Go 开发环境(Go 1.14 及以上,GCC 4.8.5 及以上) -* 如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动) +* 如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动) 配置好环境变量,检查命令: diff --git a/docs/zh/14-reference/03-connector/rust.mdx b/docs/zh/08-connector/06-rust.mdx similarity index 97% rename from docs/zh/14-reference/03-connector/rust.mdx rename to docs/zh/08-connector/06-rust.mdx index ae644e191166e244ae42373aeef2cbbacbe9e0e1..26f53c82d630fda168dd98b4c8ec993afc5e3a1d 100644 --- a/docs/zh/14-reference/03-connector/rust.mdx +++ b/docs/zh/08-connector/06-rust.mdx @@ -1,6 +1,5 @@ --- toc_max_heading_level: 4 -sidebar_position: 5 sidebar_label: Rust title: TDengine Rust Connector --- @@ -9,9 +8,9 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import Preparition from "./_preparition.mdx" -import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx" -import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx" -import RustQuery from "../../07-develop/04-query-data/_rust.mdx" +import RustInsert from "../07-develop/03-insert-data/_rust_sql.mdx" +import RustBind from "../07-develop/03-insert-data/_rust_stmt.mdx" +import RustQuery from "../07-develop/04-query-data/_rust.mdx" [![Crates.io](https://img.shields.io/crates/v/taos)](https://crates.io/crates/taos) ![Crates.io](https://img.shields.io/crates/d/taos) [![docs.rs](https://img.shields.io/docsrs/taos)](https://docs.rs/taos) @@ -28,7 +27,7 @@ Websocket 连接支持所有能运行 Rust 的平台。 ## 版本支持 -请参考[版本支持列表](/reference/connector#版本支持) +请参考[版本支持列表](../#版本支持) Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine,以避免已知问题。 @@ -37,7 +36,7 @@ Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容 ### 安装前准备 * 安装 Rust 开发工具链 -* 如果使用原生连接,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动) +* 如果使用原生连接,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动) ### 添加 taos 依赖 diff --git a/docs/zh/14-reference/03-connector/python.mdx b/docs/zh/08-connector/07-python.mdx similarity index 95% rename from docs/zh/14-reference/03-connector/python.mdx rename to docs/zh/08-connector/07-python.mdx index d7b17dc74a6d62da3adfd1a10d8d62a9570226a1..0242486d3b8820ac38301d38ccbaf8bb9fc7e1c3 100644 --- a/docs/zh/14-reference/03-connector/python.mdx +++ b/docs/zh/08-connector/07-python.mdx @@ -1,5 +1,4 @@ --- -sidebar_position: 3 sidebar_label: Python title: TDengine Python Connector description: "taospy 是 TDengine 的官方 Python 连接器。taospy 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。tasopy 对 TDengine 的原生接口和 REST 接口都进行了封装, 分别对应 tasopy 的两个子模块:tasos 和 taosrest。除了对原生接口和 REST 接口的封装,taospy 还提供了符合 Python 数据访问规范(PEP 249)的编程接口。这使得 taospy 和很多第三方工具集成变得简单,比如 SQLAlchemy 和 pandas" @@ -8,7 +7,7 @@ description: "taospy 是 TDengine 的官方 Python 连接器。taospy 提供了 import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -`taospy` 是 TDengine 的官方 Python 连接器。`taospy` 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。`taospy` 对 TDengine 的[原生接口](/reference/connector/cpp)和 [REST 接口](/reference/rest-api)都进行了封装, 分别对应 `taospy` 包的 `taos` 模块 和 `taosrest` 模块。 +`taospy` 是 TDengine 的官方 Python 连接器。`taospy` 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。`taospy` 对 TDengine 的[原生接口](../cpp)和 [REST 接口](../rest-api)都进行了封装, 分别对应 `taospy` 包的 `taos` 模块 和 `taosrest` 模块。 除了对原生接口和 REST 接口的封装,`taospy` 还提供了符合 [Python 数据访问规范(PEP 249)](https://peps.python.org/pep-0249/) 的编程接口。这使得 `taospy` 和很多第三方工具集成变得简单,比如 [SQLAlchemy](https://www.sqlalchemy.org/) 和 [pandas](https://pandas.pydata.org/)。 使用客户端驱动提供的原生接口直接与服务端建立的连接的方式下文中称为“原生连接”;使用 taosAdapter 提供的 REST 接口与服务端建立的连接的方式下文中称为“REST 连接”。 @@ -17,7 +16,7 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con ## 支持的平台 -- 原生连接[支持的平台](/reference/connector/#支持的平台)和 TDengine 客户端支持的平台一致。 +- 原生连接[支持的平台](../#支持的平台)和 TDengine 客户端支持的平台一致。 - REST 连接支持所有能运行 Python 的平台。 ## 版本选择 @@ -150,10 +149,19 @@ curl -u root:taosdata http://:/rest/sql -d "select server_version()" ```json { - "status": "succ", - "head": ["server_version()"], - "column_meta": [["server_version()", 8, 8]], - "data": [["2.4.0.16"]], + "code": 0, + "column_meta": [ + [ + "server_version()", + "VARCHAR", + 7 + ] + ], + "data": [ + [ + "3.0.0.0" + ] + ], "rows": 1 } ``` @@ -266,7 +274,7 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线 ##### RestClient 类的使用 -`RestClient` 类是对于 [REST API](/reference/rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。 +`RestClient` 类是对于 [REST API](../rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。 ```python title="RestClient 的使用" {{#include docs/examples/python/rest_client_example.py}} diff --git a/docs/zh/14-reference/03-connector/node.mdx b/docs/zh/08-connector/08-node.mdx similarity index 92% rename from docs/zh/14-reference/03-connector/node.mdx rename to docs/zh/08-connector/08-node.mdx index b089da99d26d0d671641fd0b50119853a04000a9..167ae069d6175873679e8c7cc4ecbb16dafe2ad8 100644 --- a/docs/zh/14-reference/03-connector/node.mdx +++ b/docs/zh/08-connector/08-node.mdx @@ -1,6 +1,5 @@ --- toc_max_heading_level: 4 -sidebar_position: 6 sidebar_label: Node.js title: TDengine Node.js Connector --- @@ -9,11 +8,11 @@ import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; import Preparition from "./_preparition.mdx"; -import NodeInsert from "../../07-develop/03-insert-data/_js_sql.mdx"; -import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx"; -import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx"; -import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx"; -import NodeQuery from "../../07-develop/04-query-data/_js.mdx"; +import NodeInsert from "../07-develop/03-insert-data/_js_sql.mdx"; +import NodeInfluxLine from "../07-develop/03-insert-data/_js_line.mdx"; +import NodeOpenTSDBTelnet from "../07-develop/03-insert-data/_js_opts_telnet.mdx"; +import NodeOpenTSDBJson from "../07-develop/03-insert-data/_js_opts_json.mdx"; +import NodeQuery from "../07-develop/04-query-data/_js.mdx"; `@tdengine/client` 和 `@tdengine/rest` 是 TDengine 的官方 Node.js 语言连接器。 Node.js 开发人员可以通过它开发可以存取 TDengine 集群数据的应用软件。注意:从 TDengine 3.0 开始 Node.js 原生连接器的包名由 `td2.0-connector` 改名为 `@tdengine/client` 而 rest 连接器的包名由 `td2.0-rest-connector` 改为 `@tdengine/rest`。并且不与 TDengine 2.x 兼容。 @@ -28,7 +27,7 @@ REST 连接器支持所有能运行 Node.js 的平台。 ## 版本支持 -请参考[版本支持列表](/reference/connector#版本支持) +请参考[版本支持列表](../#版本支持) ## 支持的功能特性 @@ -52,7 +51,7 @@ REST 连接器支持所有能运行 Node.js 的平台。 ### 安装前准备 - 安装 Node.js 开发环境 -- 如果使用 REST 连接器,跳过此步。但如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)。我们使用 [node-gyp](https://github.com/nodejs/node-gyp) 和 TDengine 实例进行交互,还需要根据具体操作系统来安装下文提到的一些依赖工具。 +- 如果使用 REST 连接器,跳过此步。但如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动)。我们使用 [node-gyp](https://github.com/nodejs/node-gyp) 和 TDengine 实例进行交互,还需要根据具体操作系统来安装下文提到的一些依赖工具。 diff --git a/docs/zh/14-reference/03-connector/csharp.mdx b/docs/zh/08-connector/09-csharp.mdx similarity index 90% rename from docs/zh/14-reference/03-connector/csharp.mdx rename to docs/zh/08-connector/09-csharp.mdx index 723c12932b410e9f85a0f35cd0c0b8273f4f7723..be27bfb685d5890813aa65199813f021f7e92066 100644 --- a/docs/zh/14-reference/03-connector/csharp.mdx +++ b/docs/zh/08-connector/09-csharp.mdx @@ -1,6 +1,5 @@ --- toc_max_heading_level: 4 -sidebar_position: 7 sidebar_label: C# title: C# Connector --- @@ -9,16 +8,16 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import Preparition from "./_preparition.mdx" -import CSInsert from "../../07-develop/03-insert-data/_cs_sql.mdx" -import CSInfluxLine from "../../07-develop/03-insert-data/_cs_line.mdx" -import CSOpenTSDBTelnet from "../../07-develop/03-insert-data/_cs_opts_telnet.mdx" -import CSOpenTSDBJson from "../../07-develop/03-insert-data/_cs_opts_json.mdx" -import CSQuery from "../../07-develop/04-query-data/_cs.mdx" -import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx" +import CSInsert from "../07-develop/03-insert-data/_cs_sql.mdx" +import CSInfluxLine from "../07-develop/03-insert-data/_cs_line.mdx" +import CSOpenTSDBTelnet from "../07-develop/03-insert-data/_cs_opts_telnet.mdx" +import CSOpenTSDBJson from "../07-develop/03-insert-data/_cs_opts_json.mdx" +import CSQuery from "../07-develop/04-query-data/_cs.mdx" +import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx" `TDengine.Connector` 是 TDengine 提供的 C# 语言连接器。C# 开发人员可以通过它开发存取 TDengine 集群数据的 C# 应用软件。 -`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [REST API](/reference/rest-api/) 文档自行编写。 +`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [REST API](../rest-api/) 文档自行编写。 本文介绍如何在 Linux 或 Windows 环境中安装 `TDengine.Connector`,并通过 `TDengine.Connector` 连接 TDengine 集群,进行数据写入、查询等基本操作。 @@ -32,7 +31,7 @@ import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx" ## 版本支持 -请参考[版本支持列表](/reference/connector#版本支持) +请参考[版本支持列表](../#版本支持) ## 支持的功能特性 @@ -49,7 +48,7 @@ import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx" * 安装 [.NET SDK](https://dotnet.microsoft.com/download) * [Nuget 客户端](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (可选安装) -* 安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动) +* 安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动) ### 使用 dotnet CLI 安装 diff --git a/docs/zh/14-reference/03-connector/php.mdx b/docs/zh/08-connector/10-php.mdx similarity index 95% rename from docs/zh/14-reference/03-connector/php.mdx rename to docs/zh/08-connector/10-php.mdx index 2b7ff2a6febd162fe34ebb737d2f33fbd9fc58a2..5e32c709de89d69b8602b506a9c774cb0a0244f0 100644 --- a/docs/zh/14-reference/03-connector/php.mdx +++ b/docs/zh/08-connector/10-php.mdx @@ -1,5 +1,4 @@ --- -sidebar_position: 1 sidebar_label: PHP title: PHP Connector --- @@ -38,7 +37,7 @@ TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一 ### 安装 TDengine 客户端驱动 -TDengine 客户端驱动的安装请参考 [安装指南](/reference/connector#安装步骤) +TDengine 客户端驱动的安装请参考 [安装指南](../#安装步骤) ### 编译安装 php-tdengine @@ -61,7 +60,7 @@ phpize && ./configure && make -j && make install **手动指定 tdengine 目录:** ```shell -phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install +phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install ``` > `--with-tdengine-dir=` 后跟上 tdengine 目录。 diff --git a/docs/zh/14-reference/03-connector/_01-error-code.md b/docs/zh/08-connector/_01-error-code.md similarity index 99% rename from docs/zh/14-reference/03-connector/_01-error-code.md rename to docs/zh/08-connector/_01-error-code.md index 53e006e108543805232c8195474f2afd793e7332..3111d4bbf8a071500052309f2e3643f494c1be9a 100644 --- a/docs/zh/14-reference/03-connector/_01-error-code.md +++ b/docs/zh/08-connector/_01-error-code.md @@ -1,6 +1,7 @@ --- sidebar_label: 错误码 title: TDengine C/C++ 连接器错误码 +description: C/C++ 连接器的错误码列表和详细说明 --- 本文中详细列举了在使用 TDengine C/C++ 连接器时客户端可能得到的错误码以及所要采取的相应动作。其它语言的连接器在使用原生连接方式时也会所得到的返回码返回给连接器的调用者。 diff --git a/docs/zh/14-reference/03-connector/_category_.yml b/docs/zh/08-connector/_category_.yml similarity index 100% rename from docs/zh/14-reference/03-connector/_category_.yml rename to docs/zh/08-connector/_category_.yml diff --git a/docs/zh/14-reference/03-connector/_linux_install.mdx b/docs/zh/08-connector/_linux_install.mdx similarity index 89% rename from docs/zh/14-reference/03-connector/_linux_install.mdx rename to docs/zh/08-connector/_linux_install.mdx index eb7f68328809fdf75ce11a3bddb324b59bfe8dcb..0b1f415f54d28e65a519d35ab94a84b3aa8338ba 100644 --- a/docs/zh/14-reference/03-connector/_linux_install.mdx +++ b/docs/zh/08-connector/_linux_install.mdx @@ -1,10 +1,10 @@ -import PkgList from "/components/PkgList"; +import PkgListV3 from "/components/PkgListV3"; 1. 下载客户端安装包 - + - [所有下载](https://www.taosdata.com/cn/all-downloads/) + [所有下载](../../releases/tdengine) 2. 解压缩软件包 diff --git a/docs/zh/08-connector/_preparition.mdx b/docs/zh/08-connector/_preparition.mdx new file mode 100644 index 0000000000000000000000000000000000000000..87538ebfd8c60507aec90ee86e427d85979dbc4a --- /dev/null +++ b/docs/zh/08-connector/_preparition.mdx @@ -0,0 +1,10 @@ +- 已安装客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装) + +:::info + +由于 TDengine 的客户端驱动使用 C 语言编写,使用原生连接时需要加载系统对应安装在本地的客户端驱动共享库文件,通常包含在 TDengine 安装包。TDengine Linux 服务端安装包附带了 TDengine 客户端,也可以单独安装 [Linux 客户端](/get-started/) 。在 Windows 环境开发时需要安装 TDengine 对应的 [Windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client) 。 + +- libtaos.so: 在 Linux 系统中成功安装 TDengine 后,依赖的 Linux 版客户端驱动 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。 +- taos.dll: 在 Windows 系统中安装完客户端之后,依赖的 Windows 版客户端驱动 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。 + +::: diff --git a/docs/zh/14-reference/03-connector/_verify_linux.mdx b/docs/zh/08-connector/_verify_linux.mdx similarity index 100% rename from docs/zh/14-reference/03-connector/_verify_linux.mdx rename to docs/zh/08-connector/_verify_linux.mdx diff --git a/docs/zh/14-reference/03-connector/_verify_windows.mdx b/docs/zh/08-connector/_verify_windows.mdx similarity index 100% rename from docs/zh/14-reference/03-connector/_verify_windows.mdx rename to docs/zh/08-connector/_verify_windows.mdx diff --git a/docs/zh/14-reference/03-connector/_windows_install.mdx b/docs/zh/08-connector/_windows_install.mdx similarity index 87% rename from docs/zh/14-reference/03-connector/_windows_install.mdx rename to docs/zh/08-connector/_windows_install.mdx index 755f96b2d728621de5752ce752e5d249cda0f6d9..3cd688e615fe8bcf1aa472506754366f3a6db011 100644 --- a/docs/zh/14-reference/03-connector/_windows_install.mdx +++ b/docs/zh/08-connector/_windows_install.mdx @@ -1,11 +1,11 @@ -import PkgList from "/components/PkgList"; +import PkgListV3 from "/components/PkgListV3"; 1. 下载客户端安装包 - - - [所有下载](https://www.taosdata.com/cn/all-downloads/) + + [所有下载](../../releases/tdengine) + 2. 执行安装程序,按提示选择默认值,完成安装 3. 安装路径 diff --git a/docs/zh/14-reference/03-connector/connector.webp b/docs/zh/08-connector/connector.webp similarity index 100% rename from docs/zh/14-reference/03-connector/connector.webp rename to docs/zh/08-connector/connector.webp diff --git a/docs/zh/14-reference/03-connector/03-connector.mdx b/docs/zh/08-connector/index.md similarity index 98% rename from docs/zh/14-reference/03-connector/03-connector.mdx rename to docs/zh/08-connector/index.md index bdad0b7e25a3a94fa34b14bf47403ba2afd7db8d..17de8e926cd9a3633dc8746b0fb49c38ff8ca61f 100644 --- a/docs/zh/14-reference/03-connector/03-connector.mdx +++ b/docs/zh/08-connector/index.md @@ -1,5 +1,7 @@ --- +sidebar_label: 连接器 title: 连接器 +description: 详细介绍各种语言的连接器及 REST API --- TDengine 提供了丰富的应用程序开发接口,为了便于用户快速开发自己的应用,TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 C/C++、Java、Python、Go、Node.js、C# 和 Rust 的连接器。这些连接器支持使用原生接口(taosc)和 REST 接口(部分语言暂不支持)连接 TDengine 集群。社区开发者也贡献了多个非官方连接器,例如 ADO.NET 连接器、Lua 连接器和 PHP 连接器。 diff --git a/docs/zh/14-reference/03-connector/tdengine-jdbc-connector.webp b/docs/zh/08-connector/tdengine-jdbc-connector.webp similarity index 100% rename from docs/zh/14-reference/03-connector/tdengine-jdbc-connector.webp rename to docs/zh/08-connector/tdengine-jdbc-connector.webp diff --git a/docs/zh/10-deployment/01-deploy.md b/docs/zh/10-deployment/01-deploy.md index 22a9c2ff8e68880ce5b0be2e01924eca12707a37..03b4ce30f980cd77e9845076ce9bb35c4474f948 100644 --- a/docs/zh/10-deployment/01-deploy.md +++ b/docs/zh/10-deployment/01-deploy.md @@ -1,6 +1,7 @@ --- sidebar_label: 手动部署 title: 集群部署和管理 +description: 使用命令行工具手动部署 TDengine 集群 --- ## 准备工作 @@ -70,7 +71,7 @@ serverPort 6030 ## 启动集群 -按照《立即开始》里的步骤,启动第一个数据节点,例如 h1.taosdata.com,然后执行 taos,启动 taos shell,从 shell 里执行命令“SHOW DNODES”,如下所示: +按照《立即开始》里的步骤,启动第一个数据节点,例如 h1.taosdata.com,然后执行 taos,启动 TDengine CLI,在其中执行命令 “SHOW DNODES”,如下所示: ``` taos> show dnodes; @@ -114,7 +115,7 @@ SHOW DNODES; 任何已经加入集群在线的数据节点,都可以作为后续待加入节点的 firstEp。 firstEp 这个参数仅仅在该数据节点首次加入集群时有作用,加入集群后,该数据节点会保存最新的 mnode 的 End Point 列表,不再依赖这个参数。 -接下来,配置文件中的 firstEp 参数就主要在客户端连接的时候使用了,例如 taos shell 如果不加参数,会默认连接由 firstEp 指定的节点。 +接下来,配置文件中的 firstEp 参数就主要在客户端连接的时候使用了,例如 TDengine CLI 如果不加参数,会默认连接由 firstEp 指定的节点。 两个没有配置 firstEp 参数的数据节点 dnode 启动后,会独立运行起来。这个时候,无法将其中一个数据节点加入到另外一个数据节点,形成集群。无法将两个独立的集群合并成为新的集群。 ::: diff --git a/docs/zh/10-deployment/03-k8s.md b/docs/zh/10-deployment/03-k8s.md index 396b8343243ba824dd87b83fd5f94c14c2059730..0cae59657c2a0199d3452bc37d36f2c537944d21 100644 --- a/docs/zh/10-deployment/03-k8s.md +++ b/docs/zh/10-deployment/03-k8s.md @@ -1,6 +1,7 @@ --- sidebar_label: Kubernetes title: 在 Kubernetes 上部署 TDengine 集群 +description: 利用 Kubernetes 部署 TDengine 集群的详细指南 --- 作为面向云原生架构设计的时序数据库,TDengine 支持 Kubernetes 部署。这里介绍如何使用 YAML 文件一步一步从头创建一个 TDengine 集群,并重点介绍 Kubernetes 环境下 TDengine 的常用操作。 @@ -9,6 +10,7 @@ title: 在 Kubernetes 上部署 TDengine 集群 要使用 Kubernetes 部署管理 TDengine 集群,需要做好如下准备工作。 +* 本文适用 Kubernetes v1.5 以上版本 * 本文和下一章使用 minikube、kubectl 和 helm 等工具进行安装部署,请提前安装好相应软件 * Kubernetes 已经安装部署并能正常访问使用或更新必要的容器仓库或其他服务 @@ -365,7 +367,7 @@ kubectl scale statefulsets tdengine --replicas=1 ``` -在 taos shell 中的所有数据库操作将无法成功。 +在 TDengine CLI 中的所有数据库操作将无法成功。 ``` taos> show dnodes; diff --git a/docs/zh/10-deployment/05-helm.md b/docs/zh/10-deployment/05-helm.md index 9a723ff62f23da4906ee268becef1d812c29d797..9a3b21f09296e6f5a8dbd089225b6580b9567586 100644 --- a/docs/zh/10-deployment/05-helm.md +++ b/docs/zh/10-deployment/05-helm.md @@ -1,6 +1,7 @@ --- sidebar_label: Helm title: 使用 Helm 部署 TDengine 集群 +description: 使用 Helm 部署 TDengine 集群的详细指南 --- Helm 是 Kubernetes 的包管理器,上一节使用 Kubernets 部署 TDengine 集群的操作已经足够简单,但 Helm 依然可以提供更强大的能力。 @@ -171,70 +172,19 @@ taoscfg: TAOS_REPLICA: "1" - # number of days per DB file - # TAOS_DAYS: "10" - - # number of days to keep DB file, default is 10 years. - #TAOS_KEEP: "3650" - - # cache block size (Mbyte) - #TAOS_CACHE: "16" - - # number of cache blocks per vnode - #TAOS_BLOCKS: "6" - - # minimum rows of records in file block - #TAOS_MIN_ROWS: "100" - - # maximum rows of records in file block - #TAOS_MAX_ROWS: "4096" - - # - # TAOS_NUM_OF_THREADS_PER_CORE: number of threads per CPU core - #TAOS_NUM_OF_THREADS_PER_CORE: "1.0" + # TAOS_NUM_OF_RPC_THREADS: number of threads for RPC + #TAOS_NUM_OF_RPC_THREADS: "2" # # TAOS_NUM_OF_COMMIT_THREADS: number of threads to commit cache data #TAOS_NUM_OF_COMMIT_THREADS: "4" - # - # TAOS_RATIO_OF_QUERY_CORES: - # the proportion of total CPU cores available for query processing - # 2.0: the query threads will be set to double of the CPU cores. - # 1.0: all CPU cores are available for query processing [default]. - # 0.5: only half of the CPU cores are available for query. - # 0.0: only one core available. - #TAOS_RATIO_OF_QUERY_CORES: "1.0" - - # - # TAOS_KEEP_COLUMN_NAME: - # the last_row/first/last aggregator will not change the original column name in the result fields - #TAOS_KEEP_COLUMN_NAME: "0" - - # enable/disable backuping vnode directory when removing vnode - #TAOS_VNODE_BAK: "1" - # enable/disable installation / usage report #TAOS_TELEMETRY_REPORTING: "1" - # enable/disable load balancing - #TAOS_BALANCE: "1" - - # max timer control blocks - #TAOS_MAX_TMR_CTRL: "512" - # time interval of system monitor, seconds #TAOS_MONITOR_INTERVAL: "30" - # number of seconds allowed for a dnode to be offline, for cluster only - #TAOS_OFFLINE_THRESHOLD: "8640000" - - # RPC re-try timer, millisecond - #TAOS_RPC_TIMER: "1000" - - # RPC maximum time for ack, seconds. - #TAOS_RPC_MAX_TIME: "600" - # time interval of dnode status reporting to mnode, seconds, for cluster only #TAOS_STATUS_INTERVAL: "1" @@ -245,37 +195,7 @@ taoscfg: #TAOS_MIN_SLIDING_TIME: "10" # minimum time window, milli-second - #TAOS_MIN_INTERVAL_TIME: "10" - - # maximum delay before launching a stream computation, milli-second - #TAOS_MAX_STREAM_COMP_DELAY: "20000" - - # maximum delay before launching a stream computation for the first time, milli-second - #TAOS_MAX_FIRST_STREAM_COMP_DELAY: "10000" - - # retry delay when a stream computation fails, milli-second - #TAOS_RETRY_STREAM_COMP_DELAY: "10" - - # the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9 - #TAOS_STREAM_COMP_DELAY_RATIO: "0.1" - - # max number of vgroups per db, 0 means configured automatically - #TAOS_MAX_VGROUPS_PER_DB: "0" - - # max number of tables per vnode - #TAOS_MAX_TABLES_PER_VNODE: "1000000" - - # the number of acknowledgments required for successful data writing - #TAOS_QUORUM: "1" - - # enable/disable compression - #TAOS_COMP: "2" - - # write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync - #TAOS_WAL_LEVEL: "1" - - # if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away - #TAOS_FSYNC: "3000" + #TAOS_MIN_INTERVAL_TIME: "1" # the compressed rpc message, option: # -1 (no compression) @@ -283,17 +203,8 @@ taoscfg: # > 0 (rpc message body which larger than this value will be compressed) #TAOS_COMPRESS_MSG_SIZE: "-1" - # max length of an SQL - #TAOS_MAX_SQL_LENGTH: "1048576" - - # the maximum number of records allowed for super table time sorting - #TAOS_MAX_NUM_OF_ORDERED_RES: "100000" - # max number of connections allowed in dnode - #TAOS_MAX_SHELL_CONNS: "5000" - - # max number of connections allowed in client - #TAOS_MAX_CONNECTIONS: "5000" + #TAOS_MAX_SHELL_CONNS: "50000" # stop writing logs when the disk size of the log folder is less than this value #TAOS_MINIMAL_LOG_DIR_G_B: "0.1" @@ -313,21 +224,8 @@ taoscfg: # enable/disable system monitor #TAOS_MONITOR: "1" - # enable/disable recording the SQL statements via restful interface - #TAOS_HTTP_ENABLE_RECORD_SQL: "0" - - # number of threads used to process http requests - #TAOS_HTTP_MAX_THREADS: "2" - - # maximum number of rows returned by the restful interface - #TAOS_RESTFUL_ROW_LIMIT: "10240" - - # The following parameter is used to limit the maximum number of lines in log files. - # max number of lines per log filters - # numOfLogLines 10000000 - # enable/disable async log - #TAOS_ASYNC_LOG: "0" + #TAOS_ASYNC_LOG: "1" # # time of keeping log files, days @@ -344,25 +242,8 @@ taoscfg: # debug flag for all log type, take effect when non-zero value\ #TAOS_DEBUG_FLAG: "143" - # enable/disable recording the SQL in taos client - #TAOS_ENABLE_RECORD_SQL: "0" - # generate core file when service crash #TAOS_ENABLE_CORE_FILE: "1" - - # maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden - #TAOS_MAX_BINARY_DISPLAY_WIDTH: "30" - - # enable/disable stream (continuous query) - #TAOS_STREAM: "1" - - # in retrieve blocking model, only in 50% query threads will be used in query processing in dnode - #TAOS_RETRIEVE_BLOCKING_MODEL: "0" - - # the maximum allowed query buffer size in MB during query processing for each data node - # -1 no limit (default) - # 0 no query allowed, queries are disabled - #TAOS_QUERY_BUFFER_SIZE: "-1" ``` ## 扩容 diff --git a/docs/zh/10-deployment/index.md b/docs/zh/10-deployment/index.md index 96ac7b176d1125df6cf4763a485c4edba520a48c..4ff1add779c68a7098002dd95dcf28c9dc1acf72 100644 --- a/docs/zh/10-deployment/index.md +++ b/docs/zh/10-deployment/index.md @@ -1,5 +1,7 @@ --- +sidebar_label: 部署集群 title: 部署集群 +description: 部署 TDengine 集群的多种方式 --- TDengine 支持集群,提供水平扩展的能力。如果需要获得更高的处理能力,只需要多增加节点即可。TDengine 采用虚拟节点技术,将一个节点虚拟化为多个虚拟节点,以实现负载均衡。同时,TDengine可以将多个节点上的虚拟节点组成虚拟节点组,通过多副本机制,以保证供系统的高可用。TDengine的集群功能完全开源。 diff --git a/docs/zh/12-taos-sql/01-data-type.md b/docs/zh/12-taos-sql/01-data-type.md index 8ac6ee3b872bd31f616ea0aea3fd4a093abb4402..b8ef050fb79fce5e5d2d65753480a6b156cfbc40 100644 --- a/docs/zh/12-taos-sql/01-data-type.md +++ b/docs/zh/12-taos-sql/01-data-type.md @@ -11,7 +11,7 @@ description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类 - 时间格式为 `YYYY-MM-DD HH:mm:ss.MS`,默认时间分辨率为毫秒。比如:`2017-08-12 18:25:58.128` - 内部函数 now 是客户端的当前时间 - 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间 -- Epoch Time:时间戳也可以是一个长整数,表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数;纳秒精度逻辑类似。) +- Epoch Time:时间戳也可以是一个长整数,表示从 UTC 时间 1970-01-01 00:00:00 开始的毫秒数。相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从 UTC 时间 1970-01-01 00:00:00 开始的微秒数;纳秒精度逻辑类似。 - 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降采样操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n (自然月) 和 y (自然年)。 TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 PRECISION 参数也可以支持微秒和纳秒。 @@ -34,7 +34,7 @@ CREATE DATABASE db_name PRECISION 'ns'; | 7 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] | | 8 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。 | | 9 | SMALLINT | 2 | 短整型, 范围 [-32768, 32767] | -| 10 | SMALLINT UNSIGNED | 2| 无符号短整型,范围 [0, 655357] | +| 10 | SMALLINT UNSIGNED | 2| 无符号短整型,范围 [0, 65535] | | 11 | TINYINT | 1 | 单字节整型,范围 [-128, 127] | | 12 | TINYINT UNSIGNED | 1 | 无符号单字节整型,范围 [0, 255] | | 13 | BOOL | 1 | 布尔型,{true, false} | diff --git a/docs/zh/12-taos-sql/03-table.md b/docs/zh/12-taos-sql/03-table.md index 1e20f73541b7465db76603dc16da8cd1daea0191..9c33c45efcf006344ba5d84a0cbce7bc683f8559 100644 --- a/docs/zh/12-taos-sql/03-table.md +++ b/docs/zh/12-taos-sql/03-table.md @@ -1,5 +1,7 @@ --- title: 表管理 +sidebar_label: 表 +description: 对表的各种管理操作 --- ## 创建表 @@ -8,27 +10,27 @@ title: 表管理 ```sql CREATE TABLE [IF NOT EXISTS] [db_name.]tb_name (create_definition [, create_definitionn] ...) [table_options] - + CREATE TABLE create_subtable_clause - + CREATE TABLE [IF NOT EXISTS] [db_name.]tb_name (create_definition [, create_definitionn] ...) [TAGS (create_definition [, create_definitionn] ...)] [table_options] - + create_subtable_clause: { create_subtable_clause [create_subtable_clause] ... | [IF NOT EXISTS] [db_name.]tb_name USING [db_name.]stb_name [(tag_name [, tag_name] ...)] TAGS (tag_value [, tag_value] ...) } - + create_definition: col_name column_definition - + column_definition: type_name [comment 'string_value'] - + table_options: table_option ... - + table_option: { COMMENT 'string_value' | WATERMARK duration[,duration] @@ -52,12 +54,13 @@ table_option: { 需要注意的是转义字符中的内容必须是可打印字符。 **参数说明** + 1. COMMENT:表注释。可用于超级表、子表和普通表。 -2. WATERMARK:指定窗口的关闭时间,默认值为 5 秒,最小单位毫秒,范围为0到15分钟,多个以逗号分隔。只可用于超级表,且只有当数据库使用了RETENTIONS参数时,才可以使用此表参数。 -3. MAX_DELAY:用于控制推送计算结果的最大延迟,默认值为 interval 的值(但不能超过最大值),最小单位毫秒,范围为1毫秒到15分钟,多个以逗号分隔。注:不建议 MAX_DELAY 设置太小,否则会过于频繁的推送结果,影响存储和查询性能,如无特殊需求,取默认值即可。只可用于超级表,且只有当数据库使用了RETENTIONS参数时,才可以使用此表参数。 -4. ROLLUP:Rollup 指定的聚合函数,提供基于多层级的降采样聚合结果。只可用于超级表。只有当数据库使用了RETENTIONS参数时,才可以使用此表参数。作用于超级表除TS列外的其它所有列,但是只能定义一个聚合函数。 聚合函数支持 avg, sum, min, max, last, first。 -5. SMA:Small Materialized Aggregates,提供基于数据块的自定义预计算功能。预计算类型包括MAX、MIN和SUM。可用于超级表/普通表。 -6. TTL:Time to Live,是用户用来指定表的生命周期的参数。如果在持续的TTL时间内,都没有数据写入该表,则TDengine系统会自动删除该表。这个TTL的时间只是一个大概时间,我们系统不保证到了时间一定会将其删除,而只保证存在这样一个机制。TTL单位是天,默认为0,表示不限制。用户需要注意,TTL优先级高于KEEP,即TTL时间满足删除机制时,即使当前数据的存在时间小于KEEP,此表也会被删除。只可用于子表和普通表。 +2. WATERMARK:指定窗口的关闭时间,默认值为 5 秒,最小单位毫秒,范围为 0 到 15 分钟,多个以逗号分隔。只可用于超级表,且只有当数据库使用了 RETENTIONS 参数时,才可以使用此表参数。 +3. MAX_DELAY:用于控制推送计算结果的最大延迟,默认值为 interval 的值(但不能超过最大值),最小单位毫秒,范围为 1 毫秒到 15 分钟,多个以逗号分隔。注:不建议 MAX_DELAY 设置太小,否则会过于频繁的推送结果,影响存储和查询性能,如无特殊需求,取默认值即可。只可用于超级表,且只有当数据库使用了 RETENTIONS 参数时,才可以使用此表参数。 +4. ROLLUP:Rollup 指定的聚合函数,提供基于多层级的降采样聚合结果。只可用于超级表。只有当数据库使用了 RETENTIONS 参数时,才可以使用此表参数。作用于超级表除 TS 列外的其它所有列,但是只能定义一个聚合函数。 聚合函数支持 avg, sum, min, max, last, first。 +5. SMA:Small Materialized Aggregates,提供基于数据块的自定义预计算功能。预计算类型包括 MAX、MIN 和 SUM。可用于超级表/普通表。 +6. TTL:Time to Live,是用户用来指定表的生命周期的参数。如果创建表时指定了这个参数,当该表的存在时间超过 TTL 指定的时间后,TDengine 自动删除该表。这个 TTL 的时间只是一个大概时间,系统不保证到了时间一定会将其删除,而只保证存在这样一个机制且最终一定会删除。TTL 单位是天,默认为 0,表示不限制,到期时间为表创建时间加上 TTL 时间。 ## 创建子表 @@ -87,7 +90,7 @@ CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF ```sql ALTER TABLE [db_name.]tb_name alter_table_clause - + alter_table_clause: { alter_table_options | ADD COLUMN col_name column_type @@ -95,10 +98,10 @@ alter_table_clause: { | MODIFY COLUMN col_name column_type | RENAME COLUMN old_col_name new_col_name } - + alter_table_options: alter_table_option ... - + alter_table_option: { TTL value | COMMENT 'string_value' @@ -108,9 +111,10 @@ alter_table_option: { **使用说明** 对普通表可以进行如下修改操作 + 1. ADD COLUMN:添加列。 2. DROP COLUMN:删除列。 -3. ODIFY COLUMN:修改列定义,如果数据列的类型是可变长类型,那么可以使用此指令修改其宽度,只能改大,不能改小。 +3. MODIFY COLUMN:修改列定义,如果数据列的类型是可变长类型,那么可以使用此指令修改其宽度,只能改大,不能改小。 4. RENAME COLUMN:修改列名称。 ### 增加列 @@ -141,15 +145,15 @@ ALTER TABLE tb_name RENAME COLUMN old_col_name new_col_name ```sql ALTER TABLE [db_name.]tb_name alter_table_clause - + alter_table_clause: { alter_table_options | SET TAG tag_name = new_tag_value } - + alter_table_options: alter_table_option ... - + alter_table_option: { TTL value | COMMENT 'string_value' @@ -157,6 +161,7 @@ alter_table_option: { ``` **使用说明** + 1. 对子表的列和标签的修改,除了更改标签值以外,都要通过超级表才能进行。 ### 修改子表标签值 @@ -167,7 +172,7 @@ ALTER TABLE tb_name SET TAG tag_name=new_tag_value; ## 删除表 -可以在一条SQL语句中删除一个或多个普通表或子表。 +可以在一条 SQL 语句中删除一个或多个普通表或子表。 ```sql DROP TABLE [IF EXISTS] [db_name.]tb_name [, [IF EXISTS] [db_name.]tb_name] ... @@ -177,7 +182,7 @@ DROP TABLE [IF EXISTS] [db_name.]tb_name [, [IF EXISTS] [db_name.]tb_name] ... ### 显示所有表 -如下SQL语句可以列出当前数据库中的所有表名。 +如下 SQL 语句可以列出当前数据库中的所有表名。 ```sql SHOW TABLES [LIKE tb_name_wildchar]; @@ -195,4 +200,4 @@ SHOW CREATE TABLE tb_name; ``` DESCRIBE [db_name.]tb_name; -``` \ No newline at end of file +``` diff --git a/docs/zh/12-taos-sql/04-stable.md b/docs/zh/12-taos-sql/04-stable.md index 59d9657694340ae263fb23b8c2b17ede8984426d..450ff07fd8eb636b3ee185e5594d77d645195c56 100644 --- a/docs/zh/12-taos-sql/04-stable.md +++ b/docs/zh/12-taos-sql/04-stable.md @@ -1,6 +1,7 @@ --- sidebar_label: 超级表管理 title: 超级表 STable 管理 +description: 对超级表的各种管理操作 --- ## 创建超级表 diff --git a/docs/zh/12-taos-sql/05-insert.md b/docs/zh/12-taos-sql/05-insert.md index c91e70c481055b804d88c8911fb454a3dd15b799..59af9c55ed076fb23814a24a5d2429e51d5fc051 100644 --- a/docs/zh/12-taos-sql/05-insert.md +++ b/docs/zh/12-taos-sql/05-insert.md @@ -1,6 +1,7 @@ --- sidebar_label: 数据写入 title: 数据写入 +description: 写入数据的详细语法 --- ## 写入语法 diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md index 8aa6c43747dffba640ff8715643f70d89d827f41..d8ff3f04ed261ade9f8253d57a33e9c56adfefd6 100644 --- a/docs/zh/12-taos-sql/06-select.md +++ b/docs/zh/12-taos-sql/06-select.md @@ -1,6 +1,7 @@ --- sidebar_label: 数据查询 title: 数据查询 +description: 查询数据的详细语法 --- ## 查询语法 @@ -103,7 +104,7 @@ SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts; 在超级表和子表的查询中可以指定 _标签列_,且标签列的值会与普通列的数据一起返回。 ```sql -ELECT location, groupid, current FROM d1001 LIMIT 2; +SELECT location, groupid, current FROM d1001 LIMIT 2; ``` ### 结果去重 @@ -354,19 +355,15 @@ SELECT ... FROM (SELECT ... FROM ...) ...; :::info -- 目前仅支持一层嵌套,也即不能在子查询中再嵌入子查询。 -- 内层查询的返回结果将作为“虚拟表”供外层查询使用,此虚拟表可以使用 AS 语法做重命名,以便于外层查询中方便引用。 -- 目前不能在“连续查询”功能中使用子查询。 +- 内层查询的返回结果将作为“虚拟表”供外层查询使用,此虚拟表建议起别名,以便于外层查询中方便引用。 - 在内层和外层查询中,都支持普通的表间/超级表间 JOIN。内层查询的计算结果也可以再参与数据子表的 JOIN 操作。 -- 目前内层查询、外层查询均不支持 UNION 操作。 - 内层查询支持的功能特性与非嵌套的查询语句能力是一致的。 - 内层查询的 ORDER BY 子句一般没有意义,建议避免这样的写法以免无谓的资源消耗。 - 与非嵌套的查询语句相比,外层查询所能支持的功能特性存在如下限制: - 计算函数部分: - - 如果内层查询的结果数据未提供时间戳,那么计算过程依赖时间戳的函数在外层会无法正常工作。例如:TOP, BOTTOM, FIRST, LAST, DIFF。 - - 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:STDDEV, PERCENTILE。 - - 外层查询中不支持 IN 算子,但在内层中可以使用。 - - 外层查询不支持 GROUP BY。 + - 如果内层查询的结果数据未提供时间戳,那么计算过程隐式依赖时间戳的函数在外层会无法正常工作。例如:INTERP, DERIVATIVE, IRATE, LAST_ROW, FIRST, LAST, TWA, STATEDURATION, TAIL, UNIQUE。 + - 如果内层查询的结果数据不是有效的时间序列,那么计算过程依赖数据为时间序列的函数在外层会无法正常工作。例如:LEASTSQUARES, ELAPSED, INTERP, DERIVATIVE, IRATE, TWA, DIFF, STATECOUNT, STATEDURATION, CSUM, MAVG, TAIL, UNIQUE。 + - 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:PERCENTILE。 ::: diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index bbf6b52eb985bef2decebcacd27d777bd1999b1f..9f999181c40c4f51e1499f9189fe63bacf2222df 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -1,6 +1,7 @@ --- sidebar_label: 函数 title: 函数 +description: TDengine 支持的函数列表 toc_max_heading_level: 4 --- @@ -846,7 +847,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]; ### INTERP ```sql -SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]; +SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] RANGE(timestamp1,timestamp2) EVERY(interval) FILL({ VALUE | PREV | NULL | LINEAR | NEXT}); ``` **功能说明**:返回指定时间截面指定列的记录值或插值。 @@ -855,17 +856,16 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ **适用数据类型**:数值类型。 -**适用于**:表、超级表。 +**适用于**:表和超级表。 **使用说明** - INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。 - INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。 -- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1<=timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。如果没有指定 RANGE,那么满足过滤条件的输入数据中第一条记录的 timestamp 即为 timestamp1,最后一条记录的 timestamp 即为 timestamp2,同样也满足 timestamp1 <= timestamp2。 +- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1<=timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。 - INTERP 根据 EVERY 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(EVERY 值)进行插值。如果没有指定 EVERY,则默认窗口大小为无穷大,即从 timestamp1 开始只有一个窗口。 -- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值,如果没有 FILL 字段则默认不插值,即输出为原始记录值或不输出(原始记录不存在)。 -- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 group by tbname 一起使用,当作用嵌套查询外层时内层子查询不能含 GROUP BY 信息。 -- INTERP 的插值结果不受 ORDER BY timestamp 的影响,ORDER BY timestamp 只影响输出结果的排序。 +- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。 +- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。 ### LAST @@ -1167,7 +1167,7 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W **参数范围**: -- oper : "LT" (小于)、"GT"(大于)、"LE"(小于等于)、"GE"(大于等于)、"NE"(不等于)、"EQ"(等于),不区分大小写。 +- oper : `'LT'` (小于)、`'GT'`(大于)、`'LE'`(小于等于)、`'GE'`(大于等于)、`'NE'`(不等于)、`'EQ'`(等于),不区分大小写,但需要用`''`包括。 - val : 数值型 - unit : 时间长度的单位,可取值时间单位: 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。如果省略,默认为当前数据库精度。 diff --git a/docs/zh/12-taos-sql/12-distinguished.md b/docs/zh/12-taos-sql/12-distinguished.md index 2dad49ece942d0530c12afa145c2e11682c23fe3..016c1929fe5bd016f8327dc9fae587fe015786b8 100644 --- a/docs/zh/12-taos-sql/12-distinguished.md +++ b/docs/zh/12-taos-sql/12-distinguished.md @@ -1,15 +1,16 @@ --- sidebar_label: 时序数据特色查询 title: 时序数据特色查询 +description: TDengine 提供的时序数据特有的查询功能 --- TDengine 是专为时序数据而研发的大数据平台,存储和计算都针对时序数据的特定进行了量身定制,在支持标准 SQL 的基础之上,还提供了一系列贴合时序业务场景的特色查询语法,极大的方便时序场景的应用开发。 -TDengine 提供的特色查询包括标签切分查询和窗口切分查询。 +TDengine 提供的特色查询包括数据切分查询和窗口切分查询。 -## 标签切分查询 +## 数据切分查询 -超级表查询中,当需要针对标签进行数据切分然后在切分出的数据空间内再进行一系列的计算时使用标签切分子句,标签切分的语句如下: +当需要按一定的维度对数据进行切分然后在切分出的数据空间内再进行一系列的计算时使用数据切分子句,数据切分语句的语法如下: ```sql PARTITION BY part_list @@ -17,22 +18,23 @@ PARTITION BY part_list part_list 可以是任意的标量表达式,包括列、常量、标量函数和它们的组合。 -当 PARTITION BY 和标签一起使用时,TDengine 按如下方式处理标签切分子句: +TDengine 按如下方式处理数据切分子句: -- 标签切分子句位于 WHERE 子句之后,且不能和 JOIN 子句一起使用。 -- 标签切分子句将超级表数据按指定的标签组合进行切分,每个切分的分片进行指定的计算。计算由之后的子句定义(窗口子句、GROUP BY 子句或 SELECT 子句)。 -- 标签切分子句可以和窗口切分子句(或 GROUP BY 子句)一起使用,此时后面的子句作用在每个切分的分片上。例如,将数据按标签 location 进行分组,并对每个组按 10 分钟进行降采样,取其最大值。 +- 数据切分子句位于 WHERE 子句之后。 +- 数据切分子句将表数据按指定的维度进行切分,每个切分的分片进行指定的计算。计算由之后的子句定义(窗口子句、GROUP BY 子句或 SELECT 子句)。 +- 数据切分子句可以和窗口切分子句(或 GROUP BY 子句)一起使用,此时后面的子句作用在每个切分的分片上。例如,将数据按标签 location 进行分组,并对每个组按 10 分钟进行降采样,取其最大值。 ```sql select max(current) from meters partition by location interval(10m) ``` +数据切分子句最常见的用法就是在超级表查询中,按标签将子表数据进行切分,然后分别进行计算。特别是 PARTITION BY TBNAME 用法,它将每个子表的数据独立出来,形成一条条独立的时间序列,极大的方便了各种时序场景的统计分析。 ## 窗口切分查询 TDengine 支持按时间段窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。窗口子句用于针对查询的数据集合按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)三种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。窗口切分查询语法如下: ```sql -SELECT function_list FROM tb_name +SELECT select_list FROM tb_name [WHERE where_condition] [SESSION(ts_col, tol_val)] [STATE_WINDOW(col)] @@ -42,19 +44,15 @@ SELECT function_list FROM tb_name 在上述语法中的具体限制如下 -### 窗口切分查询中使用函数的限制 - -- 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:DIFF 以及四则运算)。 -- 此外 LAST_ROW 查询也不能与窗口聚合同时出现。 -- 标量函数(如:CEIL/FLOOR 等)也不能使用在窗口聚合查询中。 - ### 窗口子句的规则 -- 窗口子句位于标签切分子句之后,GROUP BY 子句之前,且不可以和 GROUP BY 子句一起使用。 +- 窗口子句位于数据切分子句之后,GROUP BY 子句之前,且不可以和 GROUP BY 子句一起使用。 - 窗口子句将数据按窗口进行切分,对每个窗口进行 SELECT 列表中的表达式的计算,SELECT 列表中的表达式只能包含: - 常量。 - - 聚集函数。 + - _wstart伪列、_wend伪列和_wduration伪列。 + - 聚集函数(包括选择函数和可以由参数确定输出行数的时序特有函数)。 - 包含上面表达式的表达式。 + - 且至少包含一个聚集函数。 - 窗口子句不可以和 GROUP BY 子句一起使用。 - WHERE 语句可以指定查询的起止时间和其他过滤条件。 @@ -73,7 +71,7 @@ FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填 1. 使用 FILL 语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过 1 千万条具有插值的结果。 2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。 -3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 GROUP BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 GROUP BY 语句分组,则返回结果中每个 GROUP 内不按照时间序列严格单调递增。 +3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 PARTITION BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 PARTITION BY 语句分组,则返回结果中每个 PARTITION 内不按照时间序列严格单调递增。 ::: @@ -105,7 +103,7 @@ SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m); ### 状态窗口 -使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。(状态窗口暂不支持对超级表使用) +使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。 ![TDengine Database 时间窗口示意图](./timewindow-3.webp) @@ -121,7 +119,7 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); ![TDengine Database 时间窗口示意图](./timewindow-2.webp) -在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用) +在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。 ``` diff --git a/docs/zh/12-taos-sql/13-tmq.md b/docs/zh/12-taos-sql/13-tmq.md index 4d9c475a3829456916175d8a0518c47d67bc18ee..571300ad8cbfb031e38f330c0773ec6ee6f11e32 100644 --- a/docs/zh/12-taos-sql/13-tmq.md +++ b/docs/zh/12-taos-sql/13-tmq.md @@ -1,6 +1,7 @@ --- -sidebar_label: 消息队列 -title: 消息队列 +sidebar_label: 数据订阅 +title: 数据订阅 +description: TDengine 消息队列提供的数据订阅功能 --- TDengine 3.0.0.0 开始对消息队列做了大幅的优化和增强以简化用户的解决方案。 @@ -8,24 +9,17 @@ TDengine 3.0.0.0 开始对消息队列做了大幅的优化和增强以简化用 ## 创建订阅主题 ```sql -CREATE TOPIC [IF NOT EXISTS] topic_name AS {subquery | DATABASE db_name | STABLE stb_name }; +CREATE TOPIC [IF NOT EXISTS] topic_name AS subquery; ``` -订阅主题包括三种:列订阅、超级表订阅和数据库订阅。 -**列订阅是**用 subquery 描述,支持过滤和标量函数和 UDF 标量函数,不支持 JOIN、GROUP BY、窗口切分子句、聚合函数和 UDF 聚合函数。列订阅规则如下: +TOPIC 支持过滤和标量函数和 UDF 标量函数,不支持 JOIN、GROUP BY、窗口切分子句、聚合函数和 UDF 聚合函数。列订阅规则如下: 1. TOPIC 一旦创建则返回结果的字段确定 2. 被订阅或用于计算的列不可被删除、修改 3. 列可以新增,但新增的列不出现在订阅结果字段中 4. 对于 select \*,则订阅展开为创建时所有的列(子表、普通表为数据列,超级表为数据列加标签列) -**超级表订阅和数据库订阅**规则如下: - -1. 被订阅主体的 schema 变更不受限 -2. 返回消息中 schema 是块级别的,每块的 schema 可能不一样 -3. 列变更后写入的数据若未落盘,将以写入时的 schema 返回 -4. 列变更后写入的数据若未已落盘,将以落盘时的 schema 返回 ## 删除订阅主题 diff --git a/docs/zh/12-taos-sql/14-stream.md b/docs/zh/12-taos-sql/14-stream.md index 1a056e278c5e3620cf1a31b9e8e358f9b05929f8..70b062a6ca28549347f78f8eea21c54b1e3bcb81 100644 --- a/docs/zh/12-taos-sql/14-stream.md +++ b/docs/zh/12-taos-sql/14-stream.md @@ -1,11 +1,9 @@ --- sidebar_label: 流式计算 title: 流式计算 +description: 流式计算的相关 SQL 的详细语法 --- -在时序数据的处理中,经常要对原始数据进行清洗、预处理,再使用时序数据库进行长久的储存。用户通常需要在时序数据库之外再搭建 Kafka、Flink、Spark 等流计算处理引擎,增加了用户的开发成本和维护成本。 - -使用 TDengine 3.0 的流式计算引擎能够最大限度的减少对这些额外中间件的依赖,真正将数据的写入、预处理、长期存储、复杂分析、实时计算、实时报警触发等功能融为一体,并且,所有这些任务只需要使用 SQL 完成,极大降低了用户的学习成本、使用成本。 ## 创建流式计算 @@ -21,7 +19,7 @@ stream_options: { 其中 subquery 是 select 普通查询语法的子集: ```sql -subquery: SELECT [DISTINCT] select_list +subquery: SELECT select_list from_clause [WHERE condition] [PARTITION BY tag_list] @@ -40,6 +38,8 @@ window_clause: { 其中,SESSION 是会话窗口,tol_val 是时间间隔的最大范围。在 tol_val 时间间隔范围内的数据都属于同一个窗口,如果连续的两条数据的时间超过 tol_val,则自动开启下一个窗口。 +窗口的定义与时序数据特色查询中的定义完全相同,详见 [TDengine 特色查询](../distinguished) + 例如,如下语句创建流式计算,同时自动创建名为 avg_vol 的超级表,此流计算以一分钟为时间窗口、30 秒为前向增量统计这些电表的平均电压,并将来自 meters 表的数据的计算结果写入 avg_vol 表,不同 partition 的数据会分别创建子表并写入不同子表。 ```sql @@ -47,10 +47,18 @@ CREATE STREAM avg_vol_s INTO avg_vol AS SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s); ``` +## 流式计算的 partition + +可以使用 PARTITION BY TBNAME 或 PARTITION BY tag,对一个流进行多分区的计算,每个分区的时间线与时间窗口是独立的,会各自聚合,并写入到目的表中的不同子表。 + +不带 PARTITION BY 选项时,所有的数据将写入到一张子表。 + +流式计算创建的超级表有唯一的 tag 列 groupId,每个 partition 会被分配唯一 groupId。与 schemaless 写入一致,我们通过 MD5 计算子表名,并自动创建它。 + ## 删除流式计算 ```sql -DROP STREAM [IF NOT EXISTS] stream_name +DROP STREAM [IF NOT EXISTS] stream_name; ``` 仅删除流式计算任务,由流式计算写入的数据不会被删除。 @@ -61,6 +69,12 @@ DROP STREAM [IF NOT EXISTS] stream_name SHOW STREAMS; ``` +若要展示更详细的信息,可以使用: + +```sql +SELECT * from performance_schema.`perf_streams`; +``` + ## 流式计算的触发模式 在创建流时,可以通过 TRIGGER 指令指定流式计算的触发模式。 @@ -69,7 +83,7 @@ SHOW STREAMS; 1. AT_ONCE:写入立即触发 -2. WINDOW_CLOSE:窗口关闭时触发(窗口关闭由事件时间决定,可配合 watermark 使用,详见《流式计算的乱序数据容忍策略》) +2. WINDOW_CLOSE:窗口关闭时触发(窗口关闭由事件时间决定,可配合 watermark 使用) 3. MAX_DELAY time:若窗口关闭,则触发计算。若窗口未关闭,且未关闭时长超过 max delay 指定的时间,则触发计算。 @@ -79,21 +93,44 @@ SHOW STREAMS; MAX_DELAY 模式在窗口关闭时会立即触发计算。此外,当数据写入后,计算触发的时间超过 max delay 指定的时间,则立即触发计算 -## 流式计算的乱序数据容忍策略 +## 流式计算的窗口关闭 + +流式计算以事件时间(插入记录中的时间戳主键)为基准计算窗口关闭,而非以 TDengine 服务器的时间,以事件时间为基准,可以避免客户端与服务器时间不一致带来的问题,能够解决乱序数据写入等等问题。流式计算还提供了 watermark 来定义容忍的乱序程度。 -在创建流时,可以在 stream_option 中指定 watermark。 +在创建流时,可以在 stream_option 中指定 watermark,它定义了数据乱序的容忍上界。 流式计算通过 watermark 来度量对乱序数据的容忍程度,watermark 默认为 0。 T = 最新事件时间 - watermark -每批到来的数据都会以上述公式更新窗口关闭时间,并将窗口结束时间 < T 的所有打开的窗口关闭,若触发模式为 WINDOW_CLOSE 或 MAX_DELAY,则推送窗口聚合结果。 +每次写入的数据都会以上述公式更新窗口关闭时间,并将窗口结束时间 < T 的所有打开的窗口关闭,若触发模式为 WINDOW_CLOSE 或 MAX_DELAY,则推送窗口聚合结果。 + + +![TDengine 流式计算窗口关闭示意图](./watermark.webp) + + +图中,纵轴表示不同时刻,对于不同时刻,我们画出其对应的 TDengine 收到的数据,即为横轴。 + +横轴上的数据点表示已经收到的数据,其中蓝色的点表示事件时间(即数据中的时间戳主键)最后的数据,该数据点减去定义的 watermark 时间,得到乱序容忍的上界 T。 + +所有结束时间小于 T 的窗口都将被关闭(图中以灰色方框标记)。 + +T2 时刻,乱序数据(黄色的点)到达 TDengine,由于有 watermark 的存在,这些数据进入的窗口并未被关闭,因此可以被正确处理。 + +T3 时刻,最新事件到达,T 向后推移超过了第二个窗口关闭的时间,该窗口被关闭,乱序数据被正确处理。 + +在 window_close 或 max_delay 模式下,窗口关闭直接影响推送结果。在 at_once 模式下,窗口关闭只与内存占用有关。 + + +## 流式计算的过期数据处理策略 + +对于已关闭的窗口,再次落入该窗口中的数据被标记为过期数据. + +TDengine 对于过期数据提供两种处理方式,由 IGNORE EXPIRED 选项指定: -流式计算的过期数据处理策略 -对于已关闭的窗口,再次落入该窗口中的数据被标记为过期数据,对于过期数据,流式计算提供两种处理方式: +1. 重新计算,即 IGNORE EXPIRED 0:默认配置,从 TSDB 中重新查找对应窗口的所有数据并重新计算得到最新结果 -1. 直接丢弃:这是常见流式计算引擎提供的默认(甚至是唯一)计算模式 +2. 直接丢弃, 即 IGNORE EXPIRED 1:忽略过期数据 -2. 重新计算:从 TSDB 中重新查找对应窗口的所有数据并重新计算得到最新结果 无论在哪种模式下,watermark 都应该被妥善设置,来得到正确结果(直接丢弃模式)或避免频繁触发重算带来的性能开销(重新计算模式)。 diff --git a/docs/zh/12-taos-sql/16-operators.md b/docs/zh/12-taos-sql/16-operators.md index 22b78455fb35e9ebe5978b30505819e1a2b678c8..48e9991799abf99ca868fc30e34f0435054afa0b 100644 --- a/docs/zh/12-taos-sql/16-operators.md +++ b/docs/zh/12-taos-sql/16-operators.md @@ -1,6 +1,7 @@ --- sidebar_label: 运算符 title: 运算符 +description: TDengine 支持的所有运算符 --- ## 算术运算符 diff --git a/docs/zh/12-taos-sql/17-json.md b/docs/zh/12-taos-sql/17-json.md index 4a4a8cca732ac433ba5ada1ec3805ebfa663edb3..4cbd8eef364b1ea4e4285a34bb419a8ab3c7fc1d 100644 --- a/docs/zh/12-taos-sql/17-json.md +++ b/docs/zh/12-taos-sql/17-json.md @@ -1,6 +1,7 @@ --- sidebar_label: JSON 类型使用说明 title: JSON 类型使用说明 +description: 对 JSON 类型如何使用的详细说明 --- diff --git a/docs/zh/12-taos-sql/18-escape.md b/docs/zh/12-taos-sql/18-escape.md index 756e5c81591e7414827fdc65e228cfafc96214ad..7e543743a30aeaa125375b14ad8baf49b634d248 100644 --- a/docs/zh/12-taos-sql/18-escape.md +++ b/docs/zh/12-taos-sql/18-escape.md @@ -1,5 +1,7 @@ --- title: 转义字符说明 +sidebar_label: 转义字符 +description: TDengine 中使用转义字符的详细规则 --- ## 转义字符表 @@ -15,9 +17,6 @@ title: 转义字符说明 | `\%` | % 规则见下 | | `\_` | \_ 规则见下 | -:::note -转义符的功能从 2.4.0.4 版本开始 - ::: ## 转义字符使用规则 diff --git a/docs/zh/12-taos-sql/19-limit.md b/docs/zh/12-taos-sql/19-limit.md index ff552fc9771f5b428554acc62e9aeac03a305ecc..0dbe00f80063bbc62cae38c540e3e7b6627d53d3 100644 --- a/docs/zh/12-taos-sql/19-limit.md +++ b/docs/zh/12-taos-sql/19-limit.md @@ -1,6 +1,7 @@ --- sidebar_label: 命名与边界限制 title: 命名与边界限制 +description: 合法字符集和命名中的限制规则 --- ## 名称命名规则 @@ -30,7 +31,7 @@ title: 命名与边界限制 - 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。 - 标签名最大长度为 64 - 最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB -- SQL 语句最大长度 1048576 个字符,也可通过客户端配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 +- SQL 语句最大长度 1048576 个字符 - SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错 - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 - 数据库的副本数只能设置为 1 或 3 diff --git a/docs/zh/12-taos-sql/20-keywords.md b/docs/zh/12-taos-sql/20-keywords.md index cac29d7863ff77a6ec15bb9bddedd006317b719c..047c6b08c9646927fc8ec16a2fd390569e4404fb 100644 --- a/docs/zh/12-taos-sql/20-keywords.md +++ b/docs/zh/12-taos-sql/20-keywords.md @@ -1,6 +1,7 @@ --- sidebar_label: 保留关键字 title: TDengine 保留关键字 +description: TDengine 保留关键字的详细列表 --- ## 保留关键字 diff --git a/docs/zh/12-taos-sql/21-node.md b/docs/zh/12-taos-sql/21-node.md index 4816daf42042c0607aebf37c8b57961e5b1927fe..d47dc8198f41e7ee6e90624b0928c6bd215bb26d 100644 --- a/docs/zh/12-taos-sql/21-node.md +++ b/docs/zh/12-taos-sql/21-node.md @@ -1,6 +1,7 @@ --- sidebar_label: 集群管理 title: 集群管理 +description: 管理集群的 SQL 命令的详细解析 --- 组成 TDengine 集群的物理实体是 dnode (data node 的缩写),它是一个运行在操作系统之上的进程。在 dnode 中可以建立负责时序数据存储的 vnode (virtual node),在多节点集群环境下当某个数据库的 replica 为 3 时,该数据库中的每个 vgroup 由 3 个 vnode 组成;当数据库的 replica 为 1 时,该数据库中的每个 vgroup 由 1 个 vnode 组成。如果要想配置某个数据库为多副本,则集群中的 dnode 数量至少为 3。在 dnode 还可以创建 mnode (management node),单个集群中最多可以创建三个 mnode。在 TDengine 3.0.0.0 中为了支持存算分离,引入了一种新的逻辑节点 qnode (query node),qnode 和 vnode 既可以共存在一个 dnode 中,也可以完全分离在不同的 dnode 上。 diff --git a/docs/zh/12-taos-sql/22-meta.md b/docs/zh/12-taos-sql/22-meta.md index 8139b2fc55d420edfb766aab6ed06477fbd3621f..3ae444e8fe692fee4f2331f1a01fb67899fe7930 100644 --- a/docs/zh/12-taos-sql/22-meta.md +++ b/docs/zh/12-taos-sql/22-meta.md @@ -1,6 +1,7 @@ --- sidebar_label: 元数据 title: 存储元数据的 Information_Schema 数据库 +description: Information_Schema 数据库中存储了系统中所有的元数据信息 --- TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。`INFORMATION_SCHEMA` 数据库旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES)所提供的信息的访问。与 SHOW 语句相比,使用 SELECT ... FROM INFORMATION_SCHEMA.tablename 具有以下优点: @@ -245,3 +246,35 @@ Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们 | 1 | dnode_id | INT | dnode 的 ID | | 2 | name | BINARY(32) | 配置项名称 | | 3 | value | BINARY(64) | 该配置项的值 | + +## INS_TOPICS + +| # | **列名** | **数据类型** | **说明** | +| --- | :---------: | ------------ | ------------------------------ | +| 1 | topic_name | BINARY(192) | topic 名称 | +| 2 | db_name | BINARY(64) | topic 相关的 DB | +| 3 | create_time | TIMESTAMP | topic 的 创建时间 | +| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 | + +## INS_SUBSCRIPTIONS + +| # | **列名** | **数据类型** | **说明** | +| --- | :------------: | ------------ | ------------------------ | +| 1 | topic_name | BINARY(204) | 被订阅的 topic | +| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 | +| 3 | vgroup_id | INT | 消费者被分配的 vgroup id | +| 4 | consumer_id | BIGINT | 消费者的唯一 id | + +## INS_STREAMS + +| # | **列名** | **数据类型** | **说明** | +| --- | :----------: | ------------ | --------------------------------------- | +| 1 | stream_name | BINARY(64) | 流计算名称 | +| 2 | create_time | TIMESTAMP | 创建时间 | +| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 | +| 4 | status | BIANRY(20) | 流当前状态 | +| 5 | source_db | BINARY(64) | 源数据库 | +| 6 | target_db | BIANRY(64) | 目的数据库 | +| 7 | target_table | BINARY(192) | 流计算写入的目标表 | +| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算 | +| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算 | diff --git a/docs/zh/12-taos-sql/23-perf.md b/docs/zh/12-taos-sql/23-perf.md index ac852ee1506ce8da24c036c61ce96fa4eecaf1cb..808d9ae31a35a215a07b57e29073e998beb1e25d 100644 --- a/docs/zh/12-taos-sql/23-perf.md +++ b/docs/zh/12-taos-sql/23-perf.md @@ -1,6 +1,7 @@ --- sidebar_label: 统计数据 title: 存储统计数据的 Performance_Schema 数据库 +description: Performance_Schema 数据库中存储了系统中的各种统计信息 --- TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其中存储了与性能有关的统计数据。本节详细介绍其中的表和表结构。 @@ -61,15 +62,6 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其 | 12 | sub_status | BINARY(1000) | 子查询状态 | | 13 | sql | BINARY(1024) | SQL 语句 | -## PERF_TOPICS - -| # | **列名** | **数据类型** | **说明** | -| --- | :---------: | ------------ | ------------------------------ | -| 1 | topic_name | BINARY(192) | topic 名称 | -| 2 | db_name | BINARY(64) | topic 相关的 DB | -| 3 | create_time | TIMESTAMP | topic 的 创建时间 | -| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 | - ## PERF_CONSUMERS | # | **列名** | **数据类型** | **说明** | @@ -83,15 +75,6 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其 | 7 | subscribe_time | TIMESTAMP | 上一次发起订阅的时间 | | 8 | rebalance_time | TIMESTAMP | 上一次触发 rebalance 的时间 | -## PERF_SUBSCRIPTIONS - -| # | **列名** | **数据类型** | **说明** | -| --- | :------------: | ------------ | ------------------------ | -| 1 | topic_name | BINARY(204) | 被订阅的 topic | -| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 | -| 3 | vgroup_id | INT | 消费者被分配的 vgroup id | -| 4 | consumer_id | BIGINT | 消费者的唯一 id | - ## PERF_TRANS | # | **列名** | **数据类型** | **说明** | @@ -113,17 +96,3 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其 | 2 | create_time | TIMESTAMP | sma 创建时间 | | 3 | stable_name | BINARY(192) | sma 所属的超级表名称 | | 4 | vgroup_id | INT | sma 专属的 vgroup 名称 | - -## PERF_STREAMS - -| # | **列名** | **数据类型** | **说明** | -| --- | :----------: | ------------ | --------------------------------------- | -| 1 | stream_name | BINARY(64) | 流计算名称 | -| 2 | create_time | TIMESTAMP | 创建时间 | -| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 | -| 4 | status | BIANRY(20) | 流当前状态 | -| 5 | source_db | BINARY(64) | 源数据库 | -| 6 | target_db | BIANRY(64) | 目的数据库 | -| 7 | target_table | BINARY(192) | 流计算写入的目标表 | -| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算 | -| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算 | diff --git a/docs/zh/12-taos-sql/24-show.md b/docs/zh/12-taos-sql/24-show.md index 781f94324c78e7975abde33803cffdb914da020c..b4aafdaa0af644e05e47106b76e0c7ab074a61b8 100644 --- a/docs/zh/12-taos-sql/24-show.md +++ b/docs/zh/12-taos-sql/24-show.md @@ -1,9 +1,10 @@ --- sidebar_label: SHOW 命令 title: 使用 SHOW 命令查看系统元数据 +description: SHOW 命令的完整列表 --- -除了使用 `select` 语句查询 `INFORMATION_SCHEMA` 数据库中的表获得系统中的各种元数据、系统信息和状态之外,也可以用 `SHOW` 命令来实现同样的目的。 +SHOW 命令可以用来获取简要的系统信息。若想获取系统中详细的各种元数据、系统信息和状态,请使用 select 语句查询 INFORMATION_SCHEMA 数据库中的表。 ## SHOW ACCOUNTS @@ -194,7 +195,7 @@ SHOW STREAMS; SHOW SUBSCRIPTIONS; ``` -显示当前数据库下的所有的订阅关系 +显示当前系统内所有的订阅关系 ## SHOW TABLES diff --git a/docs/zh/12-taos-sql/25-grant.md b/docs/zh/12-taos-sql/25-grant.md index c41a3fcfc9ee42e56e48082da5b6420073d92cdf..7fb944710125de6fe4d6efcedbb0677b33e1fd0f 100644 --- a/docs/zh/12-taos-sql/25-grant.md +++ b/docs/zh/12-taos-sql/25-grant.md @@ -1,6 +1,7 @@ --- sidebar_label: 权限管理 title: 权限管理 +description: 企业版中才具有的权限管理功能 --- 本节讲述如何在 TDengine 中进行权限管理的相关操作。 @@ -8,14 +9,51 @@ title: 权限管理 ## 创建用户 ```sql -CREATE USER use_name PASS 'password'; +CREATE USER use_name PASS 'password' [SYSINFO {1|0}]; ``` 创建用户。 -use_name最长为23字节。 +use_name 最长为 23 字节。 -password最长为128字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。 +password 最长为 128 字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。 + +SYSINFO 表示用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息(如 DNODE、QNODE等)、存储相关的信息等。默认为可以查看系统信息。 + +例如,创建密码为123456且可以查看系统信息的用户test如下: + +```sql +taos> create user test pass '123456' sysinfo 1; +Query OK, 0 of 0 rows affected (0.001254s) +``` + +## 查看用户 + +```sql +SHOW USERS; +``` + +查看用户信息。 + +```sql +taos> show users; + name | super | enable | sysinfo | create_time | +================================================================================ + test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 | + root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 | +Query OK, 2 rows in database (0.001657s) +``` + +也可以通过查询INFORMATION_SCHEMA.INS_USERS系统表来查看用户信息,例如: + +```sql +taos> select * from information_schema.ins_users; + name | super | enable | sysinfo | create_time | +================================================================================ + test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 | + root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 | +Query OK, 2 rows in database (0.001953s) +``` ## 删除用户 @@ -36,9 +74,15 @@ alter_user_clause: { ``` - PASS:修改用户密码。 -- ENABLE:修改用户是否启用。1表示启用此用户,0表示禁用此用户。 -- SYSINFO:修改用户是否可查看系统信息。1表示可以查看系统信息,0表示不可以查看系统信息。 +- ENABLE:修改用户是否启用。1 表示启用此用户,0 表示禁用此用户。 +- SYSINFO:修改用户是否可查看系统信息。1 表示可以查看系统信息,0 表示不可以查看系统信息。 + +例如,禁用 test 用户: +```sql +taos> alter user test enable 0; +Query OK, 0 of 0 rows affected (0.001160s) +``` ## 授权 @@ -61,7 +105,7 @@ priv_level : { } ``` -对用户授权。 +对用户授权。授权功能只包含在企业版中。 授权级别支持到DATABASE,权限有READ和WRITE两种。 @@ -91,4 +135,4 @@ priv_level : { ``` -收回对用户的授权。 +收回对用户的授权。授权功能只包含在企业版中。 diff --git a/docs/zh/12-taos-sql/26-udf.md b/docs/zh/12-taos-sql/26-udf.md index 7ddcad298b4b9eb4191abded0663055620b741c3..764fde6e1f2e8aa38b90b4b8bc0131c9eaf44da6 100644 --- a/docs/zh/12-taos-sql/26-udf.md +++ b/docs/zh/12-taos-sql/26-udf.md @@ -1,6 +1,7 @@ --- sidebar_label: 自定义函数 title: 用户自定义函数 +description: 使用 UDF 的详细指南 --- 除了 TDengine 的内置函数以外,用户还可以编写自己的函数逻辑并加入TDengine系统中。 diff --git a/docs/zh/12-taos-sql/27-index.md b/docs/zh/12-taos-sql/27-index.md index 2c0907723e76f304566e6a19bdef2d63225f903f..f88c6cf4ffe53ae19926e09c760bedd2997a952d 100644 --- a/docs/zh/12-taos-sql/27-index.md +++ b/docs/zh/12-taos-sql/27-index.md @@ -1,6 +1,7 @@ --- sidebar_label: 索引 title: 使用索引 +description: 索引功能的使用细节 --- TDengine 从 3.0.0.0 版本开始引入了索引功能,支持 SMA 索引和 FULLTEXT 索引。 diff --git a/docs/zh/12-taos-sql/28-recovery.md b/docs/zh/12-taos-sql/28-recovery.md index 72b220b8ff44917831ac16301237702c991b9b15..582c3739073513df4ceb212080805136947e62d4 100644 --- a/docs/zh/12-taos-sql/28-recovery.md +++ b/docs/zh/12-taos-sql/28-recovery.md @@ -1,6 +1,7 @@ --- sidebar_label: 异常恢复 title: 异常恢复 +description: 如何终止出现问题的连接、查询和事务以使系统恢复正常 --- 在一个复杂的应用场景中,连接和查询任务等有可能进入一种错误状态或者耗时过长迟迟无法结束,此时需要有能够终止这些连接或任务的方法。 diff --git a/docs/zh/12-taos-sql/29-changes.md b/docs/zh/12-taos-sql/29-changes.md new file mode 100644 index 0000000000000000000000000000000000000000..d653c59a5cd1309fbdcd6ef7e3706e33c4a43dee --- /dev/null +++ b/docs/zh/12-taos-sql/29-changes.md @@ -0,0 +1,95 @@ +--- +sidebar_label: 3.0 版本语法变更 +title: 3.0 版本语法变更 +description: "TDengine 3.0 版本的语法变更说明" +--- + +## SQL 基本元素变更 + +| # | **元素** | **
    差异性
    ** | **说明** | +| - | :------- | :-------- | :------- | +| 1 | VARCHAR | 新增 | BINARY类型的别名。 +| 2 | TIMESTAMP字面量 | 新增 | 新增支持 TIMESTAMP 'timestamp format' 语法。 +| 3 | _ROWTS伪列 | 新增 | 表示时间戳主键。是_C0伪列的别名。 +| 4 | INFORMATION_SCHEMA | 新增 | 包含各种SCHEMA定义的系统数据库。 +| 5 | PERFORMANCE_SCHEMA | 新增 | 包含运行信息的系统数据库。 +| 6 | 连续查询 | 废除 | 不再支持连续查询。相关的各种语法和接口废除。 +| 7 | 混合运算 | 增强 | 查询中的混合运算(标量运算和矢量运算混合)全面增强,SELECT的各个子句均全面支持符合语法语义的混合运算。 +| 8 | 标签运算 | 新增 |在查询中,标签列可以像普通列一样参与各种运算,用于各种子句。 +| 9 | 时间线子句和时间函数用于超级表查询 | 增强 |没有PARTITION BY时,超级表的数据会被合并成一条时间线。 + +## SQL 语句变更 + +在 TDengine 中,普通表的数据模型中可使用以下数据类型。 + +| # | **语句** | **
    差异性
    ** | **说明** | +| - | :------- | :-------- | :------- | +| 1 | ALTER ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。 +| 2 | ALTER ALL DNODES | 新增 | 修改所有DNODE的参数。 +| 3 | ALTER DATABASE | 调整 | 废除
    • QUORUM:写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。3.0.0版本STRICT暂不支持修改。
    • BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。
    • UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。
    • CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。
    • COMP:3.0版本暂不支持修改。
      新增
    • CACHEMODEL:表示是否在内存中缓存子表的最近数据。
    • CACHESIZE:表示缓存子表最近数据的内存大小。
    • WAL_FSYNC_PERIOD:代替原FSYNC参数。
    • WAL_LEVEL:代替原WAL参数。
      调整
    • REPLICA:3.0.0版本暂不支持修改。
    • KEEP:3.0版本新增支持带单位的设置方式。
    +| 4 | ALTER STABLE | 调整 | 废除
    • CHANGE TAG:修改标签列的名称。3.0版本使用RENAME TAG代替。
      新增
    • RENAME TAG:代替原CHANGE TAG子句。
    • COMMENT:修改超级表的注释。
    +| 5 | ALTER TABLE | 调整 | 废除
    • CHANGE TAG:修改标签列的名称。3.0版本使用RENAME TAG代替。
      新增
    • RENAME TAG:代替原CHANGE TAG子句。
    • COMMENT:修改表的注释。
    • TTL:修改表的生命周期。
    +| 6 | ALTER USER | 调整 | 废除
    • PRIVILEGE:修改用户权限。3.0版本使用GRANT和REVOKE来授予和回收权限。
      新增
    • ENABLE:启用或停用此用户。
    • SYSINFO:修改用户是否可查看系统信息。
    +| 7 | COMPACT VNODES | 暂不支持 | 整理指定VNODE的数据。3.0.0版本暂不支持。 +| 8 | CREATE ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。 +| 9 | CREATE DATABASE | 调整 | 废除
    • BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。
    • CACHE:VNODE使用的内存块的大小。3.0版本使用BUFFER来表示VNODE写入内存池的大小。
    • CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。
    • DAYS:数据文件存储数据的时间跨度。3.0版本使用DURATION代替。
    • FSYNC:当 WAL 设置为 2 时,执行 fsync 的周期。3.0版本使用WAL_FSYNC_PERIOD代替。
    • QUORUM:写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。
    • UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。
    • WAL:WAL 级别。3.0版本使用WAL_LEVEL代替。
      新增
    • BUFFER:一个 VNODE 写入内存池大小。
    • CACHEMODEL:表示是否在内存中缓存子表的最近数据。
    • CACHESIZE:表示缓存子表最近数据的内存大小。
    • DURATION:代替原DAYS参数。新增支持带单位的设置方式。
    • PAGES:一个 VNODE 中元数据存储引擎的缓存页个数。
    • PAGESIZE:一个 VNODE 中元数据存储引擎的页大小。
    • RETENTIONS:表示数据的聚合周期和保存时长。
    • STRICT:表示数据同步的一致性要求。
    • SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表。
    • VGROUPS:数据库中初始VGROUP的数目。
    • WAL_FSYNC_PERIOD:代替原FSYNC参数。
    • WAL_LEVEL:代替原WAL参数。
    • WAL_RETENTION_PERIOD:wal文件的额外保留策略,用于数据订阅。
    • WAL_RETENTION_SIZE:wal文件的额外保留策略,用于数据订阅。
    • WAL_ROLL_PERIOD:wal文件切换时长。
    • WAL_SEGMENT_SIZE:wal单个文件大小。
      调整
    • KEEP:3.0版本新增支持带单位的设置方式。
    +| 10 | CREATE DNODE | 调整 | 新增主机名和端口号分开指定语法
    • CREATE DNODE dnode_host_name PORT port_val
    +| 11 | CREATE INDEX | 新增 | 创建SMA索引。 +| 12 | CREATE MNODE | 新增 | 创建管理节点。 +| 13 | CREATE QNODE | 新增 | 创建查询节点。 +| 14 | CREATE STABLE | 调整 | 新增表参数语法
  • COMMENT:表注释。
  • +| 15 | CREATE STREAM | 新增 | 创建流。 +| 16 | CREATE TABLE | 调整 | 新增表参数语法
    • COMMENT:表注释。
    • WATERMARK:指定窗口的关闭时间。
    • MAX_DELAY:用于控制推送计算结果的最大延迟。
    • ROLLUP:指定的聚合函数,提供基于多层级的降采样聚合结果。
    • SMA:提供基于数据块的自定义预计算功能。
    • TTL:用来指定表的生命周期的参数。
    +| 17 | CREATE TOPIC | 新增 | 创建订阅主题。 +| 18 | DROP ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。 +| 19 | DROP CONSUMER GROUP | 新增 | 删除消费组。 +| 20 | DROP INDEX | 新增 | 删除索引。 +| 21 | DROP MNODE | 新增 | 创建管理节点。 +| 22 | DROP QNODE | 新增 | 创建查询节点。 +| 23 | DROP STREAM | 新增 | 删除流。 +| 24 | DROP TABLE | 调整 | 新增批量删除语法 +| 25 | DROP TOPIC | 新增 | 删除订阅主题。 +| 26 | EXPLAIN | 新增 | 查看查询语句的执行计划。 +| 27 | GRANT | 新增 | 授予用户权限。 +| 28 | KILL TRANSACTION | 新增 | 终止管理节点的事务。 +| 29 | KILL STREAM | 废除 | 终止连续查询。3.0版本不再支持连续查询,而是用更通用的流计算来代替。 +| 30 | MERGE VGROUP | 新增 | 合并VGROUP。 +| 31 | REVOKE | 新增 | 回收用户权限。 +| 32 | SELECT | 调整 |
    • SELECT关闭隐式结果列,输出列均需要由SELECT子句来指定。
    • DISTINCT功能全面支持。2.x版本只支持对标签列去重,并且不可以和JOIN、GROUP BY等子句混用。
    • JOIN功能增强。增加支持:JOIN后WHERE条件中有OR条件;JOIN后的多表运算;JOIN后的多表GROUP BY。
    • FROM后子查询功能大幅增强。不限制子查询嵌套层数;支持子查询和UNION ALL混合使用;移除其他一些之前版本的语法限制。
    • WHERE后可以使用任意的标量表达式。
    • GROUP BY功能增强。支持任意标量表达式及其组合的分组。
    • SESSION可以用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
    • STATE_WINDOW可以用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
    • ORDER BY功能大幅增强。不再必须和GROUP BY子句一起使用;不再有排序表达式个数的限制;增加支持NULLS FIRST/LAST语法功能;支持符合语法语义的任意表达式。
    • 新增PARTITION BY语法。替代原来的GROUP BY tags。
    +| 33 | SHOW ACCOUNTS | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。 +| 34 | SHOW APPS |新增 | 显示接入集群的应用(客户端)信息。 +| 35 | SHOW CONSUMERS | 新增 | 显示当前数据库下所有活跃的消费者的信息。 +| 36 | SHOW DATABASES | 调整 | 3.0版本只显示数据库名。 +| 37 | SHOW FUNCTIONS | 调整 | 3.0版本只显示自定义函数名。 +| 38 | SHOW LICENCE | 新增 | 和SHOW GRANTS 命令等效。 +| 39 | SHOW INDEXES | 新增 | 显示已创建的索引。 +| 40 | SHOW LOCAL VARIABLES | 新增 | 显示当前客户端配置参数的运行值。 +| 41 | SHOW MODULES | 废除 | 显示当前系统中所安装的组件的信息。 +| 42 | SHOW QNODES | 新增 | 显示当前系统中QNODE的信息。 +| 43 | SHOW STABLES | 调整 | 3.0版本只显示超级表名。 +| 44 | SHOW STREAMS | 调整 | 2.x版本此命令显示系统中已创建的连续查询的信息。3.0版本废除了连续查询,用流代替。此命令显示已创建的流。 +| 45 | SHOW SUBSCRIPTIONS | 新增 | 显示当前数据库下的所有的订阅关系 +| 46 | SHOW TABLES | 调整 | 3.0版本只显示表名。 +| 47 | SHOW TABLE DISTRIBUTED | 新增 | 显示表的数据分布信息。代替2.x版本中的SELECT _block_dist() FROM { tb_name | stb_name }方式。 +| 48 | SHOW TOPICS | 新增 | 显示当前数据库下的所有订阅主题。 +| 49 | SHOW TRANSACTIONS | 新增 | 显示当前系统中正在执行的事务的信息。 +| 50 | SHOW DNODE VARIABLES | 新增 |显示指定DNODE的配置参数。 +| 51 | SHOW VNODES | 暂不支持 | 显示当前系统中VNODE的信息。3.0.0版本暂不支持。 +| 52 | SPLIT VGROUP | 新增 | 拆分VGROUP。 +| 53 | TRIM DATABASE | 新增 | 删除过期数据,并根据多级存储的配置归整数据。 + +## SQL 函数变更 + +| # | **函数** | **
    差异性
    ** | **说明** | +| - | :------- | :-------- | :------- | +| 1 | TWA | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。 +| 2 | IRATE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。 +| 3 | LEASTSQUARES | 增强 | 可以用于超级表了。 +| 4 | ELAPSED | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。 +| 5 | DIFF | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。 +| 6 | DERIVATIVE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。 +| 7 | CSUM | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。 +| 8 | MAVG | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。 +| 9 | SAMPLE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。 +| 10 | STATECOUNT | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。 +| 11 | STATEDURATION | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。 diff --git a/docs/zh/12-taos-sql/index.md b/docs/zh/12-taos-sql/index.md index 900fff1ba250198d03ff1d8f37261c36f7efa150..739d26b2240ddfcf32a269015f5c8915f4854f33 100644 --- a/docs/zh/12-taos-sql/index.md +++ b/docs/zh/12-taos-sql/index.md @@ -1,11 +1,11 @@ --- -title: TAOS SQL -description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容" +title: TDengine SQL +description: 'TDengine SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容' --- -本文档说明 TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。 +本文档说明 TDengine SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。TDengine 3.0 版本相比 2.x 版本做了大量改进和优化,特别是查询引擎进行了彻底的重构,因此 SQL 语法相比 2.x 版本有很多变更。详细的变更内容请见 [3.0 版本语法变更](/taos-sql/changes) 章节 -TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 提供标准的 SQL 语法,并针对时序数据和业务的特点优化和新增了许多语法和功能。TAOS SQL 语句的最大长度为 1M。TAOS SQL 不支持关键字的缩写,例如 DELETE 不能缩写为 DEL。 +TDengine SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TDengine SQL 提供标准的 SQL 语法,并针对时序数据和业务的特点优化和新增了许多语法和功能。TDengine SQL 语句的最大长度为 1M。TDengine SQL 不支持关键字的缩写,例如 DELETE 不能缩写为 DEL。 本章节 SQL 语法遵循如下约定: diff --git a/docs/zh/12-taos-sql/watermark.webp b/docs/zh/12-taos-sql/watermark.webp new file mode 100644 index 0000000000000000000000000000000000000000..3307faccffdaaec6dddf5cad8b7c11016fd28bd4 Binary files /dev/null and b/docs/zh/12-taos-sql/watermark.webp differ diff --git a/docs/zh/14-reference/02-rest-api/_category_.yml b/docs/zh/14-reference/02-rest-api/_category_.yml deleted file mode 100644 index 57a20d8458e937f60c41806be4392ebb2d13e0f7..0000000000000000000000000000000000000000 --- a/docs/zh/14-reference/02-rest-api/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: REST API diff --git a/docs/zh/14-reference/04-taosadapter.md b/docs/zh/14-reference/04-taosadapter.md index 9baafb9b9582445280d5c73c891694e2134d15fb..71bf5f4223ae97cf2c1153aaea3b8f946e213522 100644 --- a/docs/zh/14-reference/04-taosadapter.md +++ b/docs/zh/14-reference/04-taosadapter.md @@ -30,7 +30,7 @@ taosAdapter 提供以下功能: ### 安装 taosAdapter -taosAdapter 从 TDengine v2.4.0.0 版本开始成为 TDengine 服务端软件 的一部分,如果您使用 TDengine server 您不需要任何额外的步骤来安装 taosAdapter。您可以从[涛思数据官方网站](https://taosdata.com/cn/all-downloads/)下载 TDengine server(taosAdapter 包含在 v2.4.0.0 及以上版本)安装包。如果需要将 taosAdapter 分离部署在 TDengine server 之外的服务器上,则应该在该服务器上安装完整的 TDengine 来安装 taosAdapter。如果您需要使用源代码编译生成 taosAdapter,您可以参考[构建 taosAdapter](https://github.com/taosdata/taosadapter/blob/3.0/BUILD-CN.md)文档。 +taosAdapter 是 TDengine 服务端软件 的一部分,如果您使用 TDengine server 您不需要任何额外的步骤来安装 taosAdapter。您可以从[涛思数据官方网站](https://taosdata.com/cn/all-downloads/)下载 TDengine server 安装包。如果需要将 taosAdapter 分离部署在 TDengine server 之外的服务器上,则应该在该服务器上安装完整的 TDengine 来安装 taosAdapter。如果您需要使用源代码编译生成 taosAdapter,您可以参考[构建 taosAdapter](https://github.com/taosdata/taosadapter/blob/3.0/BUILD-CN.md)文档。 ### start/stop taosAdapter @@ -156,7 +156,7 @@ AllowWebSockets ## 功能列表 - RESTful 接口 - [https://docs.taosdata.com/reference/rest-api/](https://docs.taosdata.com/reference/rest-api/) + [RESTful API](../../connector/rest-api) - 兼容 InfluxDB v1 写接口 [https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/) - 兼容 OpenTSDB JSON 和 telnet 格式写入 @@ -179,7 +179,7 @@ AllowWebSockets ### TDengine RESTful 接口 -您可以使用任何支持 http 协议的客户端通过访问 RESTful 接口地址 `http://:6041/rest/sql` 来写入数据到 TDengine 或从 TDengine 中查询数据。细节请参考[官方文档](/reference/rest-api/)。 +您可以使用任何支持 http 协议的客户端通过访问 RESTful 接口地址 `http://:6041/rest/sql` 来写入数据到 TDengine 或从 TDengine 中查询数据。细节请参考[官方文档](../../connector/rest-api/)。 ### InfluxDB @@ -329,4 +329,4 @@ taosAdapter 通过参数 `restfulRowLimit` 来控制结果的返回条数,-1 | 3 | telegrafUseFieldNum | 请参考 taosAdapter telegraf 配置方法 | | 4 | restfulRowLimit | restfulRowLimit | 内嵌 httpd 默认输出 10240 行数据,最大允许值为 102400。taosAdapter 也提供 restfulRowLimit 但是默认不做限制。您可以根据实际场景需求进行配置 | | 5 | httpDebugFlag | 不适用 | httpdDebugFlag 对 taosAdapter 不起作用 | -| 6 | httpDBNameMandatory | 不适用 | taosAdapter 要求 URL 中必须指定数据库名 | \ No newline at end of file +| 6 | httpDBNameMandatory | 不适用 | taosAdapter 要求 URL 中必须指定数据库名 | diff --git a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp index a78e18028a94c2f6a783b08d992a25c791527407..3bc0d960f1db45ee8d2adcee26de89334e681956 100644 Binary files a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp and b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp differ diff --git a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp index b152418d0902b8ebdf62ebce6705c10dd5ab4fbf..f5a602d3f9dcecb64ded5e1f463ba460daab0024 100644 Binary files a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp and b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp differ diff --git a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp index f58f48b7f17375cb8e62e7c0126ca3aea56a13f6..f155fa42a0fb5df71ee48c8c65a8c7d8851ddc3e 100644 Binary files a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp and b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp differ diff --git a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp index 00afcce013602dce0da17bfd033f65aaa8e43bb7..dc0b85e262bd4340e986a42105e0ff9838d12fa6 100644 Binary files a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp and b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp differ diff --git a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-5-database.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-5-database.webp index 567e5694f9d7a035a3eb354493d3df8ed64db251..342c8cfc0a8e852e7cd092aff453ed1fd2ec85a2 100644 Binary files a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-5-database.webp and b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-5-database.webp differ diff --git a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp index 8666193f59497180574fd2786266e5baabbe9761..942130d4fabf7944c7add10acb3bb42ca7f51e0f 100644 Binary files a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp and b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp differ diff --git a/docs/zh/14-reference/07-tdinsight/assets/howto-add-datasource.webp b/docs/zh/14-reference/07-tdinsight/assets/howto-add-datasource.webp index 06d0ff6ed50091a6340508bc5b2b3f78b65dcb18..d7fc9e233acd1a4b1bbb940b13bc4296c261a33a 100644 Binary files a/docs/zh/14-reference/07-tdinsight/assets/howto-add-datasource.webp and b/docs/zh/14-reference/07-tdinsight/assets/howto-add-datasource.webp differ diff --git a/docs/zh/14-reference/07-tdinsight/assets/import_dashboard.webp b/docs/zh/14-reference/07-tdinsight/assets/import_dashboard.webp index fb7958f1b9fbd43c8f63136024842790e711c490..ae2a1e8e9b7b63a68d56dfcd2187eca614da9a3d 100644 Binary files a/docs/zh/14-reference/07-tdinsight/assets/import_dashboard.webp and b/docs/zh/14-reference/07-tdinsight/assets/import_dashboard.webp differ diff --git a/docs/zh/14-reference/07-tdinsight/assets/import_dashboard_view.webp b/docs/zh/14-reference/07-tdinsight/assets/import_dashboard_view.webp new file mode 100644 index 0000000000000000000000000000000000000000..1b10e41c75fbbb9a30bce4aa8d1adb8216fbe127 Binary files /dev/null and b/docs/zh/14-reference/07-tdinsight/assets/import_dashboard_view.webp differ diff --git a/docs/zh/14-reference/07-tdinsight/assets/select_dashboard_db.webp b/docs/zh/14-reference/07-tdinsight/assets/select_dashboard_db.webp new file mode 100644 index 0000000000000000000000000000000000000000..956132e37e9df255d3ff82654fd357bec001e695 Binary files /dev/null and b/docs/zh/14-reference/07-tdinsight/assets/select_dashboard_db.webp differ diff --git a/docs/zh/14-reference/07-tdinsight/index.md b/docs/zh/14-reference/07-tdinsight/index.mdx similarity index 67% rename from docs/zh/14-reference/07-tdinsight/index.md rename to docs/zh/14-reference/07-tdinsight/index.mdx index 5990a831b8bc1788deaddfb38f717f2723969362..ecd63621432794e27fd80b88e864590c83e9b333 100644 --- a/docs/zh/14-reference/07-tdinsight/index.md +++ b/docs/zh/14-reference/07-tdinsight/index.mdx @@ -1,21 +1,31 @@ --- -title: TDinsight - 基于Grafana的TDengine零依赖监控解决方案 +title: TDinsight sidebar_label: TDinsight +description: 基于Grafana的TDengine零依赖监控解决方案 --- -TDinsight 是使用内置监控数据库和 [Grafana] 对 TDengine 进行监控的解决方案。 +TDinsight 是使用监控数据库和 [Grafana] 对 TDengine 进行监控的解决方案。 -TDengine 启动后,会自动创建一个监测数据库 `log`,并自动将服务器的 CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库,并对重要的系统操作(比如登录、创建、删除数据库等)以及各种错误报警信息进行记录。通过 [Grafana] 和 [TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases),TDinsight 将集群状态、节点信息、插入及查询请求、资源使用情况等进行可视化展示,同时还支持 vnode、dnode、mnode 节点状态异常告警,为开发者实时监控 TDengine 集群运行状态提供了便利。本文将指导用户安装 Grafana 服务器并通过 `TDinsight.sh` 安装脚本自动安装 TDengine 数据源插件及部署 TDinsight 可视化面板。 +TDengine 通过 [taosKeeper](../taosKeeper) 将服务器的 CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入指定数据库,并对重要的系统操作(比如登录、创建、删除数据库等)以及各种错误报警信息进行记录。通过 [Grafana] 和 [TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases),TDinsight 将集群状态、节点信息、插入及查询请求、资源使用情况等进行可视化展示,同时还支持 vnode、dnode、mnode 节点状态异常告警,为开发者实时监控 TDengine 集群运行状态提供了便利。本文将指导用户安装 Grafana 服务器并通过 `TDinsight.sh` 安装脚本自动安装 TDengine 数据源插件及部署 TDinsight 可视化面板。 ## 系统要求 -要部署 TDinsight,需要一个单节点的 TDengine 服务器或一个多节点的 [TDengine] 集群,以及一个[Grafana]服务器。此仪表盘需要 TDengine 2.3.3.0 及以上,并启用 `log` 数据库(`monitor = 1`)。 +- 单节点的 TDengine 服务器或多节点的 [TDengine] 集群,以及一个[Grafana]服务器。此仪表盘需要 TDengine 3.0.0.0 及以上,并开启监控服务,具体配置请参考:[TDengine 监控配置](../config/#监控相关)。 +- taosAdapter 已经安装并正常运行。具体细节请参考:[taosAdapter 使用手册](../taosadapter) +- taosKeeper 已安装并正常运行。具体细节请参考:[taosKeeper 使用手册](../taosKeeper) + +记录以下信息: + +- taosAdapter 集群 REST API 地址,如:`http://tdengine.local:6041`。 +- taosAdapter 集群认证信息,可使用用户名及密码。 +- taosKeeper 记录监控指标的数据库名称。 ## 安装 Grafana -我们建议在此处使用最新的[Grafana] 7 或 8 版本。您可以在任何[支持的操作系统](https://grafana.com/docs/grafana/latest/installation/requirements/#supported-operating-systems)中,按照 [Grafana 官方文档安装说明](https://grafana.com/docs/grafana/latest/installation/) 安装 [Grafana]。 +我们建议在此处使用最新的[Grafana] 8 或 9 版本。您可以在任何[支持的操作系统](https://grafana.com/docs/grafana/latest/installation/requirements/#supported-operating-systems)中,按照 [Grafana 官方文档安装说明](https://grafana.com/docs/grafana/latest/installation/) 安装 [Grafana]。 -### 在 Debian 或 Ubuntu 上安装 Grafana + + 对于 Debian 或 Ubuntu 操作系统,建议使用 Grafana 镜像仓库。使用如下命令从零开始安装: @@ -31,6 +41,8 @@ sudo apt-get install grafana ``` ### 在 CentOS / RHEL 上安装 Grafana + + 您可以从官方 YUM 镜像仓库安装。 @@ -59,7 +71,12 @@ sudo yum install \ https://dl.grafana.com/oss/release/grafana-7.5.11-1.x86_64.rpm ``` -## 自动部署 TDinsight + + + + + + 我们提供了一个自动化安装脚本 [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) 脚本以便用户快速进行安装配置。 @@ -71,7 +88,7 @@ chmod +x TDinsight.sh ./TDinsight.sh ``` -这个脚本会自动下载最新的[Grafana TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases/latest) 和 [TDinsight 仪表盘](https://grafana.com/grafana/dashboards/15167) ,将命令行选项中的可配置参数转为 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置文件,以进行自动化部署及更新等操作。利用该脚本提供的告警设置选项,你还可以获得内置的阿里云短信告警通知支持。 +这个脚本会自动下载最新的[Grafana TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases/latest) 和 [TDinsight 仪表盘](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) ,将命令行选项中的可配置参数转为 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置文件,以进行自动化部署及更新等操作。利用该脚本提供的告警设置选项,你还可以获得内置的阿里云短信告警通知支持。 假设您在同一台主机上使用 TDengine 和 Grafana 的默认服务。运行 `./TDinsight.sh` 并打开 Grafana 浏览器窗口就可以看到 TDinsight 仪表盘了。 @@ -106,18 +123,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste -E, --external-notifier Apply external notifier uid to TDinsight dashboard. -Aliyun SMS as Notifier: --s, --sms-enabled To enable tdengine-datasource plugin builtin Aliyun SMS webhook. --N, --sms-notifier-name Provisioning notifier name.[default: TDinsight Builtin SMS] --U, --sms-notifier-uid Provisioning notifier uid, use lowercase notifier name by default. --D, --sms-notifier-is-default Set notifier as default. --I, --sms-access-key-id Aliyun SMS access key id --K, --sms-access-key-secret Aliyun SMS access key secret --S, --sms-sign-name Sign name --C, --sms-template-code Template code --T, --sms-template-param Template param, a escaped JSON string like '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}' --B, --sms-phone-numbers Comma-separated numbers list, eg "189xxxxxxxx,132xxxxxxxx" --L, --sms-listen-addr [default: 127.0.0.1:9100] ``` 大多数命令行选项都可以通过环境变量获得同样的效果。 @@ -136,17 +141,6 @@ Aliyun SMS as Notifier: | -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight 仪表盘标题。 [默认:TDinsight] | | -e | --tdinsight-可编辑 | TDINSIGHT_DASHBOARD_EDITABLE | 如果配置仪表盘可以编辑。 [默认值:false] | | -E | --external-notifier | EXTERNAL_NOTIFIER | 将外部通知程序 uid 应用于 TDinsight 仪表盘。 | -| -s | --sms-enabled | SMS_ENABLED | 启用阿里云短信 webhook 内置的 tdengine-datasource 插件。 | -| -N | --sms-notifier-name | SMS_NOTIFIER_NAME | 供应通知程序名称。[默认:`TDinsight Builtin SMS`] | -| -U | --sms-notifier-uid | SMS_NOTIFIER_UID | "Notification Channel" `uid`,默认使用程序名称的小写,其他字符用 “-” 代替。 | -| -D | --sms-notifier-is-default | SMS_NOTIFIER_IS_DEFAULT | 将内置短信通知设置为默认值。 | -| -I | --sms-access-key-id | SMS_ACCESS_KEY_ID | 阿里云短信访问密钥 id | -| -K | --sms-access-key-secret | SMS_ACCESS_KEY_SECRET | 阿里云短信访问秘钥 | -| -S | --sms-sign-name | SMS_SIGN_NAME | 签名 | -| -C | --sms-template-code | SMS_TEMPLATE_CODE | 模板代码 | -| -T | --sms-template-param | SMS_TEMPLATE_PARAM | 模板参数的 JSON 模板 | -| -B | --sms-phone-numbers | SMS_PHONE_NUMBERS | 逗号分隔的手机号列表,例如`"189xxxxxxxx,132xxxxxxxx"` | -| -L | --sms-listen-addr | SMS_LISTEN_ADDR | 内置 SMS webhook 监听地址,默认为`127.0.0.1:9100` | 假设您在主机 `tdengine` 上启动 TDengine 数据库,HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本: @@ -166,31 +160,18 @@ curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifica sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier ``` -如果你想使用[阿里云短信](https://www.aliyun.com/product/sms)服务作为通知渠道,你应该使用`-s`标志启用并添加以下参数: - -- `-N`:Notification Channel 名,默认为`TDinsight Builtin SMS`。 -- `-U`:Channel uid,默认是 `name` 的小写,任何其他字符都替换为 - ,对于默认的 `-N`,其 uid 为 `tdinsight-builtin-sms`。 -- `-I`:阿里云短信访问密钥 id。 -- `-K`:阿里云短信访问秘钥。 -- `-S`:阿里云短信签名。 -- `-C`:阿里云短信模板 ID。 -- `-T`:阿里云短信模板参数,为 JSON 格式模板,示例如下 `'{"alarm_level":"%s","time":"%s","name":"%s","content":"%s "}'`。有四个参数:告警级别、时间、名称和告警内容。 -- `-B`:电话号码列表,以逗号`,`分隔。 - 如果要监控多个 TDengine 集群,则需要设置多个 TDinsight 仪表盘。设置非默认 TDinsight 需要进行一些更改: `-n` `-i` `-t` 选项需要更改为非默认名称,如果使用 内置短信告警功能,`-N` 和 `-L` 也应该改变。 ```bash sudo ./TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1' -# 如果使用内置短信通知 -sudo ./TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1' \ - -s -N 'Env1 SMS' -I xx -K xx -S xx -C SMS_XX -T '' -B 00000000000 -L 127.0.0.01:10611 ``` 请注意,配置数据源、通知 Channel 和仪表盘在前端是不可更改的。您应该再次通过此脚本更新配置或手动更改 `/etc/grafana/provisioning` 目录(这是 Grafana 的默认目录,根据需要使用`-P`选项更改)中的配置文件。 特别地,当您使用 Grafana Cloud 或其他组织时,`-O` 可用于设置组织 ID。 `-G` 可指定 Grafana 插件安装目录。 `-e` 参数将仪表盘设置为可编辑。 -## 手动设置 TDinsight + + ### 安装 TDengine 数据源插件 @@ -247,23 +228,30 @@ sudo systemctl enable grafana-server ![TDengine Database TDinsight 数据源测试](./assets/howto-add-datasource-test.webp) + + + ### 导入仪表盘 -指向 **+** / **Create** - **import**(或 `/dashboard/import` url)。 +在配置 TDengine 数据源界面,点击 **Dashboards** tab。 ![TDengine Database TDinsight 导入仪表盘和配置](./assets/import_dashboard.webp) -在 **Import via grafana.com** 位置键入仪表盘 ID `15167` 并 **Load**。 +选择 `TDengine for 3.x`,并点击 `import`。 + +导入完成后,在搜索界面已经出现了 **TDinsight for 3.x** dashboard。 + +![TDengine Database TDinsight 查看导入结果](./assets/import_dashboard_view.webp) -![通过 grafana.com 导入](./assets/import-dashboard-15167.webp) +进入 TDinsight for 3.x dashboard 后,选择 taosKeeper 中设置的记录监控指标的数据库。 -导入完成后,TDinsight 的完整页面视图如下所示。 +![TDengine Database TDinsight 选择数据库](./assets/select_dashboard_db.webp) -![TDengine Database TDinsight 显示](./assets/TDinsight-full.webp) +然后可以看到监控结果。 ## TDinsight 仪表盘详细信息 -TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes, vnodes](https://www.taosdata.com/cn/documentation/architecture#cluster)或数据库的使用情况和状态。 +TDinsight 仪表盘旨在提供 TDengine 相关资源的使用情况和状态,比如 dnodes、 mnodes、 vnodes 和数据库等。 指标详情如下: @@ -285,7 +273,6 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes - **Measuring Points Used**:启用告警规则的测点数用量(社区版无数据,默认情况下是健康的)。 - **Grants Expire Time**:启用告警规则的企业版过期时间(社区版无数据,默认情况是健康的)。 - **Error Rate**:启用警报的集群总合错误率(每秒平均错误数)。 -- **Variables**:`show variables` 表格展示。 ### DNodes 状态 @@ -294,7 +281,6 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes - **DNodes Status**:`show dnodes` 的简单表格视图。 - **DNodes Lifetime**:从创建 dnode 开始经过的时间。 - **DNodes Number**:DNodes 数量变化。 -- **Offline Reason**:如果有任何 dnode 状态为离线,则以饼图形式展示离线原因。 ### MNode 概述 @@ -309,7 +295,6 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes 1. **Requests Rate(Inserts per Second)**:平均每秒插入次数。 2. **Requests (Selects)**:查询请求数及变化率(count of second)。 -3. **Requests (HTTP)**:HTTP 请求数和请求速率(count of second)。 ### 数据库 @@ -319,9 +304,8 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes 1. **STables**:超级表数量。 2. **Total Tables**:所有表数量。 -3. **Sub Tables**:所有超级表子表的数量。 -4. **Tables**:所有普通表数量随时间变化图。 -5. **Tables Number Foreach VGroups**:每个 VGroups 包含的表数量。 +3. **Tables**:所有普通表数量随时间变化图。 +4. **Tables Number Foreach VGroups**:每个 VGroups 包含的表数量。 ### DNode 资源使用情况 @@ -356,12 +340,11 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes 支持监控 taosAdapter 请求统计和状态详情。包括: -1. **http_request**: 包含总请求数,请求失败数以及正在处理的请求数 -2. **top 3 request endpoint**: 按终端分组,请求排名前三的数据 -3. **Memory Used**: taosAdapter 内存使用情况 -4. **latency_quantile(ms)**: (1, 2, 5, 9, 99)阶段的分位数 -5. **top 3 failed request endpoint**: 按终端分组,请求失败排名前三的数据 -6. **CPU Used**: taosAdapter CPU 使用情况 +1. **http_request_inflight**: 即时处理请求数 +2. **http_request_total**: 请求总数。 +3. **http_request_fail**: 请求总数。 +4. **CPU Used**: taosAdapter CPU 使用情况。 +5. **Memory Used**: taosAdapter 内存使用情况。 ## 升级 @@ -403,13 +386,6 @@ services: TDENGINE_API: ${TDENGINE_API} TDENGINE_USER: ${TDENGINE_USER} TDENGINE_PASS: ${TDENGINE_PASS} - SMS_ACCESS_KEY_ID: ${SMS_ACCESS_KEY_ID} - SMS_ACCESS_KEY_SECRET: ${SMS_ACCESS_KEY_SECRET} - SMS_SIGN_NAME: ${SMS_SIGN_NAME} - SMS_TEMPLATE_CODE: ${SMS_TEMPLATE_CODE} - SMS_TEMPLATE_PARAM: '${SMS_TEMPLATE_PARAM}' - SMS_PHONE_NUMBERS: $SMS_PHONE_NUMBERS - SMS_LISTEN_ADDR: ${SMS_LISTEN_ADDR} ports: - 3000:3000 volumes: diff --git a/docs/zh/14-reference/08-taos-shell.md b/docs/zh/14-reference/08-taos-shell.md index 2f3b551502c8b9da789220b1b20e701e038dc5e7..580454987840b61a5efff4acd545443ebca9904b 100644 --- a/docs/zh/14-reference/08-taos-shell.md +++ b/docs/zh/14-reference/08-taos-shell.md @@ -8,7 +8,7 @@ TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine ## 安装 -如果在 TDengine 服务器端执行,无需任何安装,已经自动安装好 TDengine CLI。如果要在非 TDengine 服务器端运行,需要安装 TDengine 客户端驱动安装包,具体安装,请参考 [连接器](/reference/connector/)。 +如果在 TDengine 服务器端执行,无需任何安装,已经自动安装好 TDengine CLI。如果要在非 TDengine 服务器端运行,需要安装 TDengine 客户端驱动安装包,具体安装,请参考 [连接器](../../connector/)。 ## 执行 @@ -18,7 +18,7 @@ TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine taos ``` -如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息。(请参考 [FAQ](/train-faq/faq) 来解决终端连接服务端失败的问题)。TDengine CLI 的提示符号如下: +如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息。(请参考 [FAQ](../../train-faq/faq) 来解决终端连接服务端失败的问题)。TDengine CLI 的提示符号如下: ```cmd taos> diff --git a/docs/zh/14-reference/11-docker/index.md b/docs/zh/14-reference/11-docker/index.md index c03990ede293962b46b77e68825e68d1f3564ecc..58bbe1e1178fbb1a1aa649508b0e36b331964753 100644 --- a/docs/zh/14-reference/11-docker/index.md +++ b/docs/zh/14-reference/11-docker/index.md @@ -25,13 +25,14 @@ curl -u root:taosdata -d "show databases" localhost:6041/rest/sql $ docker exec -it tdengine taos taos> show databases; - name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | -==================================================================================================================================================================================================================================================================================== - log | 2022-01-17 13:57:22.270 | 10 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready | -Query OK, 1 row(s) in set (0.002843s) + name | +================================= + information_schema | + performance_schema | +Query OK, 2 rows in database (0.033802s) ``` -因为运行在容器中的 TDengine 服务端使用容器的 hostname 建立连接,使用 taos shell 或者各种连接器(例如 JDBC-JNI)从容器外访问容器内的 TDengine 比较复杂,所以上述方式是访问容器中 TDengine 服务的最简单的方法,适用于一些简单场景。如果在一些复杂场景下想要从容器化使用 taos shell 或者各种连接器访问容器中的 TDengine 服务,请参考下一节。 +因为运行在容器中的 TDengine 服务端使用容器的 hostname 建立连接,使用 TDengine CLI 或者各种连接器(例如 JDBC-JNI)从容器外访问容器内的 TDengine 比较复杂,所以上述方式是访问容器中 TDengine 服务的最简单的方法,适用于一些简单场景。如果在一些复杂场景下想要从容器化使用 TDengine CLI 或者各种连接器访问容器中的 TDengine 服务,请参考下一节。 ## 在 host 网络上启动 TDengine @@ -45,10 +46,11 @@ docker run -d --name tdengine --network host tdengine/tdengine $ taos taos> show dnodes; - id | end_point | vnodes | cores | status | role | create_time | offline reason | -====================================================================================================================================== - 1 | myhost:6030 | 1 | 8 | ready | any | 2022-01-17 22:10:32.619 | | -Query OK, 1 row(s) in set (0.003233s) + id | endpoint | vnodes | support_vnodes | status | create_time | note | +================================================================================================================================================= + 1 | vm98:6030 | 0 | 32 | ready | 2022-08-19 14:50:05.337 | | +Query OK, 1 rows in database (0.010654s) + ``` ## 以指定的 hostname 和 port 启动 TDengine @@ -59,12 +61,13 @@ Query OK, 1 row(s) in set (0.003233s) docker run -d \ --name tdengine \ -e TAOS_FQDN=tdengine \ - -p 6030-6049:6030-6049 \ - -p 6030-6049:6030-6049/udp \ + -p 6030:6030 \ + -p 6041-6049:6041-6049 \ + -p 6041-6049:6041-6049/udp \ tdengine/tdengine ``` -上面的命令在容器中启动一个 TDengine 服务,其所监听的 hostname 为 tdengine ,并将容器的 6030 到 6049 端口段映射到主机的 6030 到 6049 端口段 (tcp 和 udp 都需要映射)。如果主机上该端口段已经被占用,可以修改上述命令指定一个主机上空闲的端口段。如果 `rpcForceTcp` 被设置为 `1` ,可以只映射 tcp 协议。 +上面的命令在容器中启动一个 TDengine 服务,其所监听的 hostname 为 tdengine ,并将容器的 6030 端口映射到主机的 6030 端口(TCP,只能映射主机 6030 端口),6041-6049 端口段映射到主机 6041-6049 端口段(tcp 和 udp 都需要映射,如果主机上该端口段已经被占用,可以修改上述命令指定一个主机上空闲的端口段)。 接下来,要确保 "tdengine" 这个 hostname 在 `/etc/hosts` 中可解析。 @@ -72,7 +75,7 @@ docker run -d \ echo 127.0.0.1 tdengine |sudo tee -a /etc/hosts ``` -最后,可以从 taos shell 或者任意连接器以 "tdengine" 为服务端地址访问 TDengine 服务。 +最后,可以从 TDengine CLI 或者任意连接器以 "tdengine" 为服务端地址访问 TDengine 服务。 ```shell taos -h tdengine -P 6030 @@ -103,9 +106,9 @@ taos -h tdengine -P 6030 3. 在同一网络上的另一容器中启动 TDengine 客户端 ```shell - docker run --rm -it --network td-net -e TAOS_FIRST_EP=tdengine tdengine/tdengine taos + docker run --rm -it --network td-net -e TAOS_FIRST_EP=tdengine --entrypoint=taos tdengine/tdengine # or - #docker run --rm -it --network td-net -e tdengine/tdengine taos -h tdengine + #docker run --rm -it --network td-net --entrypoint=taos tdengine/tdengine -h tdengine ``` ## 在容器中启动客户端应用 @@ -115,8 +118,8 @@ taos -h tdengine -P 6030 ```docker FROM ubuntu:20.04 RUN apt-get update && apt-get install -y wget -ENV TDENGINE_VERSION=2.4.0.0 -RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ +ENV TDENGINE_VERSION=3.0.0.0 +RUN wget -c https://www.tdengine.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && cd TDengine-client-${TDENGINE_VERSION} \ && ./install_client.sh \ @@ -129,6 +132,14 @@ RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_ 以下是一个 go 应用程序的示例: +* 创建 go mod 项目: + +```bash +go mod init app +``` + +* 创建 main.go: + ```go /* * In this test program, we'll create a database and insert 4 records then select out. @@ -212,12 +223,18 @@ func checkErr(err error, prompt string) { } ``` -如下是完整版本的 dockerfile +* 更新 go mod -```docker -FROM golang:1.17.6-buster as builder -ENV TDENGINE_VERSION=2.4.0.0 -RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ +```bash +go mod tidy +``` + +如下是完整版本的 dockerfile: + +```dockerfile +FROM golang:1.19.0-buster as builder +ENV TDENGINE_VERSION=3.0.0.0 +RUN wget -c https://www.tdengine.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && cd TDengine-client-${TDENGINE_VERSION} \ && ./install_client.sh \ @@ -232,8 +249,8 @@ RUN go build FROM ubuntu:20.04 RUN apt-get update && apt-get install -y wget -ENV TDENGINE_VERSION=2.4.0.0 -RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ +ENV TDENGINE_VERSION=3.0.0.0 +RUN wget -c https://www.tdengine.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && cd TDengine-client-${TDENGINE_VERSION} \ && ./install_client.sh \ @@ -248,113 +265,112 @@ CMD ["app"] 目前我们已经有了 `main.go`, `go.mod`, `go.sum`, `app.dockerfile`, 现在可以构建出这个应用程序并在 `td-net` 网络上启动它 ```shell -$ docker build -t app -f app.dockerfile -$ docker run --rm --network td-net app -h tdengine -p 6030 +$ docker build -t app -f app.dockerfile . +$ docker run --rm --network td-net app app -h tdengine -p 6030 ============= args parse result: ============= hostName: tdengine serverPort: 6030 usr: root password: taosdata ================================================ -2022-01-17 15:56:55.48 +0000 UTC 0 -2022-01-17 15:56:56.48 +0000 UTC 1 -2022-01-17 15:56:57.48 +0000 UTC 2 -2022-01-17 15:56:58.48 +0000 UTC 3 -2022-01-17 15:58:01.842 +0000 UTC 0 -2022-01-17 15:58:02.842 +0000 UTC 1 -2022-01-17 15:58:03.842 +0000 UTC 2 -2022-01-17 15:58:04.842 +0000 UTC 3 -2022-01-18 01:43:48.029 +0000 UTC 0 -2022-01-18 01:43:49.029 +0000 UTC 1 -2022-01-18 01:43:50.029 +0000 UTC 2 -2022-01-18 01:43:51.029 +0000 UTC 3 +2022-08-19 07:43:51.68 +0000 UTC 0 +2022-08-19 07:43:52.68 +0000 UTC 1 +2022-08-19 07:43:53.68 +0000 UTC 2 +2022-08-19 07:43:54.68 +0000 UTC 3 ``` ## 用 docker-compose 启动 TDengine 集群 -1. 如下 docker-compose 文件启动一个 2 副本、2 管理节点、2 数据节点以及 1 个 arbitrator 的 TDengine 集群。 - - ```docker - version: "3" - services: - arbitrator: - image: tdengine/tdengine:$VERSION - command: tarbitrator - td-1: - image: tdengine/tdengine:$VERSION - environment: - TAOS_FQDN: "td-1" - TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 - volumes: - - taosdata-td1:/var/lib/taos/ - - taoslog-td1:/var/log/taos/ - td-2: - image: tdengine/tdengine:$VERSION - environment: - TAOS_FQDN: "td-2" - TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 - volumes: - - taosdata-td2:/var/lib/taos/ - - taoslog-td2:/var/log/taos/ - volumes: - taosdata-td1: - taoslog-td1: - taosdata-td2: - taoslog-td2: - ``` +1. 如下 docker-compose 文件启动一个 三节点 TDengine 集群。 + +```yml +version: "3" +services: + td-1: + image: tdengine/tdengine:$VERSION + environment: + TAOS_FQDN: "td-1" + TAOS_FIRST_EP: "td-1" + volumes: + - taosdata-td1:/var/lib/taos/ + - taoslog-td1:/var/log/taos/ + td-2: + image: tdengine/tdengine:$VERSION + environment: + TAOS_FQDN: "td-2" + TAOS_FIRST_EP: "td-1" + volumes: + - taosdata-td2:/var/lib/taos/ + - taoslog-td2:/var/log/taos/ + td-3: + image: tdengine/tdengine:$VERSION + environment: + TAOS_FQDN: "td-3" + TAOS_FIRST_EP: "td-1" + volumes: + - taosdata-td3:/var/lib/taos/ + - taoslog-td3:/var/log/taos/ +volumes: + taosdata-td1: + taoslog-td1: + taosdata-td2: + taoslog-td2: + taosdata-td3: + taoslog-td3: +``` :::note -- `VERSION` 环境变量被用来设置 tdengine image tag -- 在新创建的实例上必须设置 `TAOS_FIRST_EP` 以使其能够加入 TDengine 集群;如果有高可用需求,则需要同时使用 `TAOS_SECOND_EP` -- `TAOS_REPLICA` 用来设置缺省的数据库副本数量,其取值范围为[1,3] - 在双副本环境下,推荐使用 arbitrator, 用 TAOS_ARBITRATOR 来设置 - ::: +* `VERSION` 环境变量被用来设置 tdengine image tag +* 在新创建的实例上必须设置 `TAOS_FIRST_EP` 以使其能够加入 TDengine 集群;如果有高可用需求,则需要同时使用 `TAOS_SECOND_EP` +::: 2. 启动集群 - ```shell - $ VERSION=2.4.0.0 docker-compose up -d - Creating network "test_default" with the default driver - Creating volume "test_taosdata-td1" with default driver - Creating volume "test_taoslog-td1" with default driver - Creating volume "test_taosdata-td2" with default driver - Creating volume "test_taoslog-td2" with default driver - Creating test_td-1_1 ... done - Creating test_arbitrator_1 ... done - Creating test_td-2_1 ... done - ``` +```shell +$ VERSION=3.0.0.0 docker-compose up -d +Creating network "test-docker_default" with the default driver +Creating volume "test-docker_taosdata-td1" with default driver +Creating volume "test-docker_taoslog-td1" with default driver +Creating volume "test-docker_taosdata-td2" with default driver +Creating volume "test-docker_taoslog-td2" with default driver +Creating volume "test-docker_taosdata-td3" with default driver +Creating volume "test-docker_taoslog-td3" with default driver + +Creating test-docker_td-3_1 ... done +Creating test-docker_td-1_1 ... done +Creating test-docker_td-2_1 ... done +``` 3. 查看节点状态 - ```shell - $ docker-compose ps - Name Command State Ports - --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - test_arbitrator_1 /usr/bin/entrypoint.sh tar ... Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp - test_td-1_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp - test_td-2_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp - ``` +```shell + docker-compose ps + Name Command State Ports -4. 用 taos shell 查看 dnodes +------------------------------------------------------------------- +test-docker_td-1_1 /tini -- /usr/bin/entrypoi ... Up +test-docker_td-2_1 /tini -- /usr/bin/entrypoi ... Up +test-docker_td-3_1 /tini -- /usr/bin/entrypoi ... Up +``` - ```shell - $ docker-compose exec td-1 taos -s "show dnodes" - - taos> show dnodes - id | end_point | vnodes | cores | status | role | create_time | offline reason | - ====================================================================================================================================== - 1 | td-1:6030 | 1 | 8 | ready | any | 2022-01-18 02:47:42.871 | | - 2 | td-2:6030 | 0 | 8 | ready | any | 2022-01-18 02:47:43.518 | | - 0 | arbitrator:6042 | 0 | 0 | ready | arb | 2022-01-18 02:47:43.633 | - | - Query OK, 3 row(s) in set (0.000811s) - ``` +4. 用 TDengine CLI 查看 dnodes + +```shell + +$ docker-compose exec td-1 taos -s "show dnodes" + +taos> show dnodes + + id | endpoint | vnodes | support_vnodes | status | create_time | note | +================================================================================================================================================= + + 1 | td-1:6030 | 0 | 32 | ready | 2022-08-19 07:57:29.971 | | + 2 | td-2:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.415 | | + 3 | td-3:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.417 | | +Query OK, 3 rows in database (0.021262s) + +``` ## taosAdapter @@ -362,93 +378,80 @@ password: taosdata 2. 同时为了部署灵活起见,可以在独立的容器中启动 taosAdapter - ```docker - services: - # ... - adapter: - image: tdengine/tdengine:$VERSION - command: taosadapter - ``` +```docker +services: + # ... + adapter: + image: tdengine/tdengine:$VERSION + command: taosadapter +``` - 如果要部署多个 taosAdapter 来提高吞吐量并提供高可用性,推荐配置方式为使用 nginx 等反向代理来提供统一的访问入口。具体配置方法请参考 nginx 的官方文档。如下是示例: - - ```docker - version: "3" - - networks: - inter: - api: - - services: - arbitrator: - image: tdengine/tdengine:$VERSION - command: tarbitrator - networks: - - inter - td-1: - image: tdengine/tdengine:$VERSION - networks: - - inter - environment: - TAOS_FQDN: "td-1" - TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 - volumes: - - taosdata-td1:/var/lib/taos/ - - taoslog-td1:/var/log/taos/ - td-2: - image: tdengine/tdengine:$VERSION - networks: - - inter - environment: - TAOS_FQDN: "td-2" - TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 - volumes: - - taosdata-td2:/var/lib/taos/ - - taoslog-td2:/var/log/taos/ - adapter: - image: tdengine/tdengine:$VERSION - command: taosadapter - networks: - - inter - environment: - TAOS_FIRST_EP: "td-1" - TAOS_SECOND_EP: "td-2" - deploy: - replicas: 4 - nginx: - image: nginx - depends_on: - - adapter - networks: - - inter - - api - ports: - - 6041:6041 - - 6044:6044/udp - command: [ - "sh", - "-c", - "while true; - do curl -s http://adapter:6041/-/ping >/dev/null && break; - done; - printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}' - > /etc/nginx/conf.d/rest.conf; - printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}' - >> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf; - nginx -g 'daemon off;'", - ] - volumes: - taosdata-td1: - taoslog-td1: - taosdata-td2: - taoslog-td2: - ``` +如果要部署多个 taosAdapter 来提高吞吐量并提供高可用性,推荐配置方式为使用 nginx 等反向代理来提供统一的访问入口。具体配置方法请参考 nginx 的官方文档。如下是示例: + +```yml +version: "3" + +networks: + inter: + +services: + td-1: + image: tdengine/tdengine:$VERSION + networks: + - inter + environment: + TAOS_FQDN: "td-1" + TAOS_FIRST_EP: "td-1" + volumes: + - taosdata-td1:/var/lib/taos/ + - taoslog-td1:/var/log/taos/ + td-2: + image: tdengine/tdengine:$VERSION + networks: + - inter + environment: + TAOS_FQDN: "td-2" + TAOS_FIRST_EP: "td-1" + volumes: + - taosdata-td2:/var/lib/taos/ + - taoslog-td2:/var/log/taos/ + adapter: + image: tdengine/tdengine:$VERSION + entrypoint: "taosadapter" + networks: + - inter + environment: + TAOS_FIRST_EP: "td-1" + TAOS_SECOND_EP: "td-2" + deploy: + replicas: 4 + nginx: + image: nginx + depends_on: + - adapter + networks: + - inter + ports: + - 6041:6041 + - 6044:6044/udp + command: [ + "sh", + "-c", + "while true; + do curl -s http://adapter:6041/-/ping >/dev/null && break; + done; + printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}' + > /etc/nginx/conf.d/rest.conf; + printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}' + >> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf; + nginx -g 'daemon off;'", + ] +volumes: + taosdata-td1: + taoslog-td1: + taosdata-td2: + taoslog-td2: +``` ## 使用 docker swarm 部署 @@ -457,50 +460,46 @@ password: taosdata docker-compose 文件可以参考上节。下面是使用 docker swarm 启动 TDengine 的命令: ```shell -$ VERSION=2.4.0 docker stack deploy -c docker-compose.yml taos +$ VERSION=3.0.0.0 docker stack deploy -c docker-compose.yml taos Creating network taos_inter -Creating network taos_api -Creating service taos_arbitrator +Creating service taos_nginx Creating service taos_td-1 Creating service taos_td-2 Creating service taos_adapter -Creating service taos_nginx ``` 查看和管理 ```shell $ docker stack ps taos -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -79ni8temw59n taos_nginx.1 nginx:latest TM1701 Running Running about a minute ago -3e94u72msiyg taos_adapter.1 tdengine/tdengine:2.4.0 TM1702 Running Running 56 seconds ago -100amjkwzsc6 taos_td-2.1 tdengine/tdengine:2.4.0 TM1703 Running Running about a minute ago -pkjehr2vvaaa taos_td-1.1 tdengine/tdengine:2.4.0 TM1704 Running Running 2 minutes ago -tpzvgpsr1qkt taos_arbitrator.1 tdengine/tdengine:2.4.0 TM1705 Running Running 2 minutes ago -rvss3g5yg6fa taos_adapter.2 tdengine/tdengine:2.4.0 TM1706 Running Running 56 seconds ago -i2augxamfllf taos_adapter.3 tdengine/tdengine:2.4.0 TM1707 Running Running 56 seconds ago -lmjyhzccpvpg taos_adapter.4 tdengine/tdengine:2.4.0 TM1708 Running Running 56 seconds ago +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +7m3sbf532bqp taos_adapter.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago +pj403n6ofmmh taos_adapter.2 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago +rxqfwsyk5q1h taos_adapter.3 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago +qj40lpxr40oc taos_adapter.4 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago +oe3455ulxpze taos_nginx.1 nginx:latest vm98 Running Running about a minute ago +o0tsg70nrrc6 taos_td-1.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago +q5m1oxs589cp taos_td-2.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago $ docker service ls -ID NAME MODE REPLICAS IMAGE PORTS -561t4lu6nfw6 taos_adapter replicated 4/4 tdengine/tdengine:2.4.0 -3hk5ct3q90sm taos_arbitrator replicated 1/1 tdengine/tdengine:2.4.0 -d8qr52envqzu taos_nginx replicated 1/1 nginx:latest *:6041->6041/tcp, *:6044->6044/udp -2isssfvjk747 taos_td-1 replicated 1/1 tdengine/tdengine:2.4.0 -9pzw7u02ichv taos_td-2 replicated 1/1 tdengine/tdengine:2.4.0 +ID NAME MODE REPLICAS IMAGE PORTS +ozuklorgl8bs taos_adapter replicated 4/4 tdengine/tdengine:3.0.0.0 +crmhdjw6vxw0 taos_nginx replicated 1/1 nginx:latest *:6041->6041/tcp, *:6044->6044/udp +o86ngy7csv5n taos_td-1 replicated 1/1 tdengine/tdengine:3.0.0.0 +rma040ny4tb0 taos_td-2 replicated 1/1 tdengine/tdengine:3.0.0.0 ``` -从上面的输出可以看到有两个 dnode, 和两个 taosAdapter,以及一个 nginx 反向代理服务。 +从上面的输出可以看到有两个 dnode, 和四个 taosAdapter,以及一个 nginx 反向代理服务。 接下来,我们可以减少 taosAdapter 服务的数量 ```shell $ docker service scale taos_adapter=1 taos_adapter scaled to 1 -overall progress: 1 out of 1 tasks -1/1: running [==================================================>] +overall progress: 1 out of 1 tasks +1/1: running [==================================================>] verify: Service converged $ docker service ls -f name=taos_adapter -ID NAME MODE REPLICAS IMAGE PORTS -561t4lu6nfw6 taos_adapter replicated 1/1 tdengine/tdengine:2.4.0 +ID NAME MODE REPLICAS IMAGE PORTS +ozuklorgl8bs taos_adapter replicated 1/1 tdengine/tdengine:3.0.0.0 ``` diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md index 845693a98e00b2342c3fd749b935b7a36f9c3fbf..7b31e10572c4a6bafd088e7b7c14853ee0d32df1 100644 --- a/docs/zh/14-reference/12-config/index.md +++ b/docs/zh/14-reference/12-config/index.md @@ -647,3 +647,174 @@ charset 的有效值是 UTF-8。 | 含义 | 是否启动 udf 服务 | | 取值范围 | 0: 不启动;1:启动 | | 缺省值 | 1 | + +## 2.X 与 3.0 配置参数对比 +| # | **参数** | **适用于 2.X 版本** | **适用于 3.0 版本** | +| --- | :-----------------: | --------------- | --------------- | +| 1 | firstEp | 是 | 是 | +| 2 | secondEp | 是 | 是 | +| 3 | fqdn | 是 | 是 | +| 4 | serverPort | 是 | 是 | +| 5 | maxShellConns | 是 | 是 | +| 6 | monitor | 是 | 是 | +| 7 | monitorFqdn | 否 | 是 | +| 8 | monitorPort | 否 | 是 | +| 9 | monitorInterval | 是 | 是 | +| 10 | monitorMaxLogs | 否 | 是 | +| 11 | monitorComp | 否 | 是 | +| 12 | telemetryReporting | 是 | 是 | +| 13 | telemetryInterval | 否 | 是 | +| 14 | telemetryServer | 否 | 是 | +| 15 | telemetryPort | 否 | 是 | +| 16 | queryPolicy | 否 | 是 | +| 17 | querySmaOptimize | 否 | 是 | +| 18 | queryBufferSize | 是 | 是 | +| 19 | maxNumOfDistinctRes | 是 | 是 | +| 20 | minSlidingTime | 是 | 是 | +| 21 | minIntervalTime | 是 | 是 | +| 22 | countAlwaysReturnValue | 是 | 是 | +| 23 | dataDir | 是 | 是 | +| 24 | minimalDataDirGB | 是 | 是 | +| 25 | supportVnodes | 否 | 是 | +| 26 | tempDir | 是 | 是 | +| 27 | minimalTmpDirGB | 是 | 是 | +| 28 | compressMsgSize | 是 | 是 | +| 29 | compressColData | 是 | 是 | +| 30 | smlChildTableName | 是 | 是 | +| 31 | smlTagName | 是 | 是 | +| 32 | smlDataFormat | 否 | 是 | +| 33 | statusInterval | 是 | 是 | +| 34 | shellActivityTimer | 是 | 是 | +| 35 | transPullupInterval | 否 | 是 | +| 36 | mqRebalanceInterval | 否 | 是 | +| 37 | ttlUnit | 否 | 是 | +| 38 | ttlPushInterval | 否 | 是 | +| 39 | numOfTaskQueueThreads | 否 | 是 | +| 40 | numOfRpcThreads | 否 | 是 | +| 41 | numOfCommitThreads | 是 | 是 | +| 42 | numOfMnodeReadThreads | 否 | 是 | +| 43 | numOfVnodeQueryThreads | 否 | 是 | +| 44 | numOfVnodeStreamThreads | 否 | 是 | +| 45 | numOfVnodeFetchThreads | 否 | 是 | +| 46 | numOfVnodeWriteThreads | 否 | 是 | +| 47 | numOfVnodeSyncThreads | 否 | 是 | +| 48 | numOfVnodeRsmaThreads | 否 | 是 | +| 49 | numOfQnodeQueryThreads | 否 | 是 | +| 50 | numOfQnodeFetchThreads | 否 | 是 | +| 51 | numOfSnodeSharedThreads | 否 | 是 | +| 52 | numOfSnodeUniqueThreads | 否 | 是 | +| 53 | rpcQueueMemoryAllowed | 否 | 是 | +| 54 | logDir | 是 | 是 | +| 55 | minimalLogDirGB | 是 | 是 | +| 56 | numOfLogLines | 是 | 是 | +| 57 | asyncLog | 是 | 是 | +| 58 | logKeepDays | 是 | 是 | +| 59 | debugFlag | 是 | 是 | +| 60 | tmrDebugFlag | 是 | 是 | +| 61 | uDebugFlag | 是 | 是 | +| 62 | rpcDebugFlag | 是 | 是 | +| 63 | jniDebugFlag | 是 | 是 | +| 64 | qDebugFlag | 是 | 是 | +| 65 | cDebugFlag | 是 | 是 | +| 66 | dDebugFlag | 是 | 是 | +| 67 | vDebugFlag | 是 | 是 | +| 68 | mDebugFlag | 是 | 是 | +| 69 | wDebugFlag | 是 | 是 | +| 70 | sDebugFlag | 是 | 是 | +| 71 | tsdbDebugFlag | 是 | 是 | +| 72 | tqDebugFlag | 否 | 是 | +| 73 | fsDebugFlag | 是 | 是 | +| 74 | udfDebugFlag | 否 | 是 | +| 75 | smaDebugFlag | 否 | 是 | +| 76 | idxDebugFlag | 否 | 是 | +| 77 | tdbDebugFlag | 否 | 是 | +| 78 | metaDebugFlag | 否 | 是 | +| 79 | timezone | 是 | 是 | +| 80 | locale | 是 | 是 | +| 81 | charset | 是 | 是 | +| 82 | udf | 是 | 是 | +| 83 | enableCoreFile | 是 | 是 | +| 84 | arbitrator | 是 | 否 | +| 85 | numOfThreadsPerCore | 是 | 否 | +| 86 | numOfMnodes | 是 | 否 | +| 87 | vnodeBak | 是 | 否 | +| 88 | balance | 是 | 否 | +| 89 | balanceInterval | 是 | 否 | +| 90 | offlineThreshold | 是 | 否 | +| 91 | role | 是 | 否 | +| 92 | dnodeNopLoop | 是 | 否 | +| 93 | keepTimeOffset | 是 | 否 | +| 94 | rpcTimer | 是 | 否 | +| 95 | rpcMaxTime | 是 | 否 | +| 96 | rpcForceTcp | 是 | 否 | +| 97 | tcpConnTimeout | 是 | 否 | +| 98 | syncCheckInterval | 是 | 否 | +| 99 | maxTmrCtrl | 是 | 否 | +| 100 | monitorReplica | 是 | 否 | +| 101 | smlTagNullName | 是 | 否 | +| 102 | keepColumnName | 是 | 否 | +| 103 | ratioOfQueryCores | 是 | 否 | +| 104 | maxStreamCompDelay | 是 | 否 | +| 105 | maxFirstStreamCompDelay | 是 | 否 | +| 106 | retryStreamCompDelay | 是 | 否 | +| 107 | streamCompDelayRatio | 是 | 否 | +| 108 | maxVgroupsPerDb | 是 | 否 | +| 109 | maxTablesPerVnode | 是 | 否 | +| 110 | minTablesPerVnode | 是 | 否 | +| 111 | tableIncStepPerVnode | 是 | 否 | +| 112 | cache | 是 | 否 | +| 113 | blocks | 是 | 否 | +| 114 | days | 是 | 否 | +| 115 | keep | 是 | 否 | +| 116 | minRows | 是 | 否 | +| 117 | maxRows | 是 | 否 | +| 118 | quorum | 是 | 否 | +| 119 | comp | 是 | 否 | +| 120 | walLevel | 是 | 否 | +| 121 | fsync | 是 | 否 | +| 122 | replica | 是 | 否 | +| 123 | partitions | 是 | 否 | +| 124 | quorum | 是 | 否 | +| 125 | update | 是 | 否 | +| 126 | cachelast | 是 | 否 | +| 127 | maxSQLLength | 是 | 否 | +| 128 | maxWildCardsLength | 是 | 否 | +| 129 | maxRegexStringLen | 是 | 否 | +| 130 | maxNumOfOrderedRes | 是 | 否 | +| 131 | maxConnections | 是 | 否 | +| 132 | mnodeEqualVnodeNum | 是 | 否 | +| 133 | http | 是 | 否 | +| 134 | httpEnableRecordSql | 是 | 否 | +| 135 | httpMaxThreads | 是 | 否 | +| 136 | restfulRowLimit | 是 | 否 | +| 137 | httpDbNameMandatory | 是 | 否 | +| 138 | httpKeepAlive | 是 | 否 | +| 139 | enableRecordSql | 是 | 否 | +| 140 | maxBinaryDisplayWidth | 是 | 否 | +| 141 | stream | 是 | 否 | +| 142 | retrieveBlockingModel | 是 | 否 | +| 143 | tsdbMetaCompactRatio | 是 | 否 | +| 144 | defaultJSONStrType | 是 | 否 | +| 145 | walFlushSize | 是 | 否 | +| 146 | keepTimeOffset | 是 | 否 | +| 147 | flowctrl | 是 | 否 | +| 148 | slaveQuery | 是 | 否 | +| 149 | adjustMaster | 是 | 否 | +| 150 | topicBinaryLen | 是 | 否 | +| 151 | telegrafUseFieldNum | 是 | 否 | +| 152 | deadLockKillQuery | 是 | 否 | +| 153 | clientMerge | 是 | 否 | +| 154 | sdbDebugFlag | 是 | 否 | +| 155 | odbcDebugFlag | 是 | 否 | +| 156 | httpDebugFlag | 是 | 否 | +| 157 | monDebugFlag | 是 | 否 | +| 158 | cqDebugFlag | 是 | 否 | +| 159 | shortcutFlag | 是 | 否 | +| 160 | probeSeconds | 是 | 否 | +| 161 | probeKillSeconds | 是 | 否 | +| 162 | probeInterval | 是 | 否 | +| 163 | lossyColumns | 是 | 否 | +| 164 | fPrecision | 是 | 否 | +| 165 | dPrecision | 是 | 否 | +| 166 | maxRange | 是 | 否 | +| 167 | range | 是 | 否 | diff --git a/docs/zh/14-reference/12-directory.md b/docs/zh/14-reference/12-directory.md index 262eb99fa5cc012d22b917479bc3d16442d06ddf..04aa6e72c9b2c0a04e35ef1f67f1138cf7d00ce2 100644 --- a/docs/zh/14-reference/12-directory.md +++ b/docs/zh/14-reference/12-directory.md @@ -30,7 +30,7 @@ TDengine 的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下 - _taosd-dump-cfg.gdb_:用于方便调试 taosd 的 gdb 执行脚本。 :::note -2.4.0.0 版本之后的 taosBenchmark 和 taosdump 需要安装独立安装包 taosTools。 +taosdump 需要安装独立安装包 taosTools。 ::: diff --git a/docs/zh/14-reference/13-schemaless/13-schemaless.md b/docs/zh/14-reference/13-schemaless/13-schemaless.md index ae4280e26a64e2d10534a0faaf70ca0704cf58a6..a33abafaf82746afbf5669c6ea564b5a87060bb8 100644 --- a/docs/zh/14-reference/13-schemaless/13-schemaless.md +++ b/docs/zh/14-reference/13-schemaless/13-schemaless.md @@ -3,7 +3,7 @@ title: Schemaless 写入 description: 'Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构' --- -在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless +在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless 将自动增加必要的数据列,保证用户写入的数据可以被正确存储。 无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,你也可以通过,SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。 @@ -36,14 +36,14 @@ tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要 - 对空格、等号(=)、逗号(,)、双引号("),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号) - 数值类型将通过后缀来区分数据类型: -| **序号** | **后缀** | **映射类型** | **大小(字节)** | -| -------- | -------- | ------------ | -------------- | -| 1 | 无或 f64 | double | 8 | -| 2 | f32 | float | 4 | -| 3 | i8/u8 | TinyInt/UTinyInt | 1 | -| 4 | i16/u16 | SmallInt/USmallInt | 2 | -| 5 | i32/u32 | Int/UInt | 4 | -| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 | +| **序号** | **后缀** | **映射类型** | **大小(字节)** | +| -------- | ----------- | ----------------------------- | -------------- | +| 1 | 无或 f64 | double | 8 | +| 2 | f32 | float | 4 | +| 3 | i8/u8 | TinyInt/UTinyInt | 1 | +| 4 | i16/u16 | SmallInt/USmallInt | 2 | +| 5 | i32/u32 | Int/UInt | 4 | +| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 | - t, T, true, True, TRUE, f, F, false, False 将直接作为 BOOL 型来处理。 @@ -69,7 +69,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。 排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。 -为了让用户可以指定生成的表名,可以通过配置smlChildTableName来指定(比如 配置smlChildTableName=tname 插入数据为st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为cpu1,注意如果多行数据tname相同,但是后面的tag_set不同,则使用第一次自动建表时指定的tag_set,其他的会忽略)。 +为了让用户可以指定生成的表名,可以通过配置 smlChildTableName 来指定(比如 配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一次自动建表时指定的 tag_set,其他的会忽略)。 2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。 3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。 @@ -78,11 +78,11 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 NULL。 6. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。 7. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。 -8. 为了提高写入的效率,默认假设同一个超级表中field_set的顺序是一样的(第一条数据包含所有的field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数smlDataFormat为false,否则,数据写入按照相同顺序写入,库中数据会异常。 +8. 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。 :::tip 无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 -16KB。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit) +16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit) ::: diff --git a/docs/zh/14-reference/14-taosKeeper.md b/docs/zh/14-reference/14-taosKeeper.md new file mode 100644 index 0000000000000000000000000000000000000000..ae0a496f03e8e545525fce49ae2394a10696c09c --- /dev/null +++ b/docs/zh/14-reference/14-taosKeeper.md @@ -0,0 +1,144 @@ +--- +sidebar_label: taosKeeper +title: taosKeeper +description: TDengine 3.0 版本监控指标的导出工具 +--- + +## 简介 + +TaosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的几项配置即可获取 TDengine 的运行状态。taosKeeper 使用 TDengine RESTful 接口,所以不需要安装 TDengine 客户端即可使用。 + +## 安装 + + +taosKeeper 安装方式: + + + + +- 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。 + +## 运行 + +### 配置和运行方式 + +taosKeeper 需要在操作系统终端执行,该工具支持三种配置方式:[命令行参数](#命令行参数启动)、[环境变量](#环境变量启动) 和 [配置文件](#配置文件启动)。优先级为:命令行参数、环境变量、配置文件参数。 + +**在运行 taosKeeper 之前要确保 TDengine 集群与 taosAdapter 已经在正确运行。** 并且 TDengine 已经开启监控服务,具体请参考:[TDengine 监控配置](../config/#监控相关)。 + +### 命令行参数启动 + +在使用命令行参数运行 taosKeeper 并控制其行为。 + +```shell +$ taosKeeper +``` + +### 环境变量启动 + +通过设置环境变量达到控制启动参数的目的,通常在容器中运行时使用。 + +```shell +$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3 + +$ taoskeeper +``` + +具体参数列表请参照 `taoskeeper -h` 输入结果。 + +### 配置文件启动 + +执行以下命令即可快速体验 taosKeeper。当不指定 taosKeeper 配置文件时,优先使用 `/etc/taos/keeper.toml` 配置,否则将使用默认配置。 + +```shell +$ taoskeeper -c +``` + +**下面是配置文件的示例:** +```toml +# gin 框架是否启用 debug +debug = false + +# 服务监听端口, 默认为 6043 +port = 6043 + +# 日志级别,包含 panic、error、info、debug、trace等 +loglevel = "info" + +# 程序中使用协程池的大小 +gopoolsize = 50000 + +# 查询 TDengine 监控数据轮询间隔 +RotationInterval = "15s" + +[tdengine] +host = "127.0.0.1" +port = 6041 +username = "root" +password = "taosdata" + +# 需要被监控的 taosAdapter +[taosAdapter] +address = ["127.0.0.1:6041","192.168.1.95:6041"] + +[metrics] +# 监控指标前缀 +prefix = "taos" + +# 集群数据的标识符 +cluster = "production" + +# 存放监控数据的数据库 +database = "log" + +# 指定需要监控的普通表 +tables = ["normal_table"] +``` + +### 获取监控指标 + +taosKeeper 作为 TDengine 监控指标的导出工具,可以将 TDengine 产生的监控数据记录在指定数据库中,并提供导出接口。 + +#### 查看监控结果集 + +```shell +$ taos +# 如上示例,使用 log 库作为监控日志存储位置 +> use log; +> select * from cluster_info limit 1; +``` + +结果示例: + +```shell + ts | first_ep | first_ep_dnode_id | version | master_uptime | monitor_interval | dbs_total | tbs_total | stbs_total | dnodes_total | dnodes_alive | mnodes_total | mnodes_alive | vgroups_total | vgroups_alive | vnodes_total | vnodes_alive | connections_total | protocol | cluster_id | +=============================================================================================================================================================================================================================================================================================================================================================================== + 2022-08-16 17:37:01.629 | hlb:6030 | 1 | 3.0.0.0 | 0.27250 | 15 | 2 | 27 | 38 | 1 | 1 | 1 | 1 | 4 | 4 | 4 | 4 | 14 | 1 | 5981392874047724755 | +Query OK, 1 rows in database (0.036162s) +``` + +#### 导出监控指标 + +```shell +$ curl http://127.0.0.1:6043/metrics +``` + +部分结果集: + +```shell +# HELP taos_cluster_info_connections_total +# TYPE taos_cluster_info_connections_total counter +taos_cluster_info_connections_total{cluster_id="5981392874047724755"} 16 +# HELP taos_cluster_info_dbs_total +# TYPE taos_cluster_info_dbs_total counter +taos_cluster_info_dbs_total{cluster_id="5981392874047724755"} 2 +# HELP taos_cluster_info_dnodes_alive +# TYPE taos_cluster_info_dnodes_alive counter +taos_cluster_info_dnodes_alive{cluster_id="5981392874047724755"} 1 +# HELP taos_cluster_info_dnodes_total +# TYPE taos_cluster_info_dnodes_total counter +taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1 +# HELP taos_cluster_info_first_ep +# TYPE taos_cluster_info_first_ep gauge +taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1 +``` \ No newline at end of file diff --git a/docs/zh/14-reference/14-taosx.md b/docs/zh/14-reference/14-taosx.md deleted file mode 100644 index ed3f8d488fe6f809e855510df913f69cc79ee6a0..0000000000000000000000000000000000000000 --- a/docs/zh/14-reference/14-taosx.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -sidebar_label: taosX -title: 使用 taosX 在集群间复制数据 ---- \ No newline at end of file diff --git a/docs/zh/14-reference/index.md b/docs/zh/14-reference/index.md index e9c0c4fe236b8eefec1275a447c1dd1188921ee0..9d0a44af577beba67c445dac1cfcac0475e0ce3f 100644 --- a/docs/zh/14-reference/index.md +++ b/docs/zh/14-reference/index.md @@ -1,5 +1,6 @@ --- title: 参考手册 +description: TDengine 中的各种组件的详细说明 --- 参考手册是对 TDengine 本身、 TDengine 各语言连接器及自带的工具最详细的介绍。 diff --git a/docs/zh/17-operation/01-pkg-install.md b/docs/zh/17-operation/01-pkg-install.md index 5e4cc931309ea8bf45b1840a7da04e336434bdab..6d93c1697b1e0936b3f6539d3b1fb95db0baa956 100644 --- a/docs/zh/17-operation/01-pkg-install.md +++ b/docs/zh/17-operation/01-pkg-install.md @@ -47,43 +47,99 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/ -内容 TBD +TDengine 卸载命令如下: + +``` +$ sudo apt-get remove tdengine +Reading package lists... Done +Building dependency tree +Reading state information... Done +The following packages will be REMOVED: + tdengine +0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded. +After this operation, 68.3 MB disk space will be freed. +Do you want to continue? [Y/n] y +(Reading database ... 135625 files and directories currently installed.) +Removing tdengine (3.0.0.0) ... +TDengine is removed successfully! + +``` + +taosTools 卸载命令如下: + +``` +$ sudo apt remove taostools +Reading package lists... Done +Building dependency tree +Reading state information... Done +The following packages will be REMOVED: + taostools +0 upgraded, 0 newly installed, 1 to remove and 0 not upgraded. +After this operation, 68.3 MB disk space will be freed. +Do you want to continue? [Y/n] +(Reading database ... 147973 files and directories currently installed.) +Removing taostools (2.1.2) ... +``` -卸载命令如下: +TDengine 卸载命令如下: ``` $ sudo dpkg -r tdengine (Reading database ... 120119 files and directories currently installed.) -Removing tdengine (3.0.0.10002) ... +Removing tdengine (3.0.0.0) ... TDengine is removed successfully! ``` +taosTools 卸载命令如下: + +``` +$ sudo dpkg -r taostools +(Reading database ... 147973 files and directories currently installed.) +Removing taostools (2.1.2) ... +``` + -卸载命令如下: +卸载 TDengine 命令如下: ``` $ sudo rpm -e tdengine TDengine is removed successfully! ``` +卸载 taosTools 命令如下: + +``` +sudo rpm -e taostools +taosToole is removed successfully! +``` + -卸载命令如下: +卸载 TDengine 命令如下: ``` $ rmtaos TDengine is removed successfully! ``` +卸载 taosTools 命令如下: + +``` +$ rmtaostools +Start to uninstall taos tools ... + +taos tools is uninstalled successfully! +``` + 在 C:\TDengine 目录下,通过运行 unins000.exe 卸载程序来卸载 TDengine。 diff --git a/docs/zh/17-operation/02-planning.mdx b/docs/zh/17-operation/02-planning.mdx index 0d63c4eaf365036cbba1d838ba6ee860a894724d..28e3f54020632e84721c20a9f63ee2a6117e6a03 100644 --- a/docs/zh/17-operation/02-planning.mdx +++ b/docs/zh/17-operation/02-planning.mdx @@ -1,6 +1,7 @@ --- sidebar_label: 容量规划 title: 容量规划 +description: 如何规划一个 TDengine 集群所需的物理资源 --- 使用 TDengine 来搭建一个物联网大数据平台,计算资源、存储资源需要根据业务场景进行规划。下面分别讨论系统运行所需要的内存、CPU 以及硬盘空间。 diff --git a/docs/zh/17-operation/03-tolerance.md b/docs/zh/17-operation/03-tolerance.md index 2cfd4b6484acdcb617cd91ed694d2f4c0f010e93..79cf10c39a7028e04e7c1ebbea54738dcdc528af 100644 --- a/docs/zh/17-operation/03-tolerance.md +++ b/docs/zh/17-operation/03-tolerance.md @@ -1,5 +1,7 @@ --- title: 容错和灾备 +sidebar_label: 容错和灾备 +description: TDengine 的容错和灾备功能 --- ## 容错 @@ -26,5 +28,3 @@ TDengine 集群中的时序数据的副本数是与数据库关联的,一个 TDengine 集群的节点数必须大于等于副本数,否则创建表时将报错。 当 TDengine 集群中的节点部署在不同的物理机上,并设置多个副本数时,就实现了系统的高可靠性,无需再使用其他软件或工具。TDengine 企业版还可以将副本部署在不同机房,从而实现异地容灾。 - -另外一种灾备方式是通过 `taosX` 将一个 TDengine 集群的数据同步复制到物理上位于不同数据中心的另一个 TDengine 集群。其详细使用方法请参考 [taosX 参考手册](../../reference/taosX) diff --git a/docs/zh/17-operation/07-import.md b/docs/zh/17-operation/07-import.md index 7dee05720d4c3446181e8e0d81a5c27e35300ba8..17945be595f9176a528e52d2344b5cd0545c3426 100644 --- a/docs/zh/17-operation/07-import.md +++ b/docs/zh/17-operation/07-import.md @@ -1,5 +1,6 @@ --- title: 数据导入 +description: 如何导入外部数据到 TDengine --- TDengine 提供多种方便的数据导入功能,一种按脚本文件导入,一种按数据文件导入,一种是 taosdump 工具导入本身导出的文件。 diff --git a/docs/zh/17-operation/08-export.md b/docs/zh/17-operation/08-export.md index 042ecc7ba29f976d50bbca1e3155bd03b2ae7ccc..44247e28bdf5ec48ccd05ab6f7e4d3558cf23103 100644 --- a/docs/zh/17-operation/08-export.md +++ b/docs/zh/17-operation/08-export.md @@ -1,12 +1,13 @@ --- title: 数据导出 +description: 如何导出 TDengine 中的数据 --- 为方便数据导出,TDengine 提供了两种导出方式,分别是按表导出和用 taosdump 导出。 ## 按表导出 CSV 文件 -如果用户需要导出一个表或一个 STable 中的数据,可在 taos shell 中运行: +如果用户需要导出一个表或一个 STable 中的数据,可在 TDengine CLI 中运行: ```sql select * from >> data.csv; diff --git a/docs/zh/17-operation/10-monitor.md b/docs/zh/17-operation/10-monitor.md index e30be775fb5c337b2a621bea92d3af31a2cb5cc0..e936f35dcac544ad94035b5e5c9716c4aa50562e 100644 --- a/docs/zh/17-operation/10-monitor.md +++ b/docs/zh/17-operation/10-monitor.md @@ -1,14 +1,15 @@ --- title: 系统监控 +description: 监控 TDengine 的运行状态 --- -TDengine 启动后,会自动创建一个监测数据库 log,并自动将服务器的 CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine 还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息记录下来存放在 log 库里。系统管理员可以从 CLI 直接查看这个数据库,也可以在 WEB 通过图形化界面查看这些监测信息。 +TDengine 通过 [taosKeeper](/reference/taosKeeper/) 将服务器的 CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度等信息定时写入指定数据库。TDengine 还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息进行记录。系统管理员可以从 CLI 直接查看这个数据库,也可以在 WEB 通过图形化界面查看这些监测信息。 这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项 monitor 将其关闭或打开。 ## TDinsight - 使用监控数据库 + Grafana 对 TDengine 进行监控的解决方案 -从 2.3.3.0 开始,监控数据库将提供更多的监控项,您可以从 [TDinsight Grafana Dashboard](https://grafana.com/grafana/dashboards/15167) 了解如何使用 TDinsight 方案对 TDengine 进行监控。 +监控数据库将提供更多的监控项,您可以从 [TDinsight Grafana Dashboard](/reference/tdinsight/) 了解如何使用 TDinsight 方案对 TDengine 进行监控。 我们提供了一个自动化脚本 `TDinsight.sh` 对 TDinsight 进行部署。 @@ -34,21 +35,6 @@ chmod +x TDinsight.sh sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E ``` - - 使用 TDengine 数据源插件内置的阿里云短信告警通知,使用 `-s` 启用之,并设置如下参数: - - 1. 阿里云短信服务 Key ID,参数 `-I` - 2. 阿里云短信服务 Key Secret,参数 `K` - 3. 阿里云短信服务签名,参数 `-S` - 4. 短信通知模板号,参数 `-C` - 5. 短信通知模板输入参数,JSON 格式,参数 `-T`,如 `{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}` - 6. 逗号分隔的通知手机列表,参数 `-B` - - ```bash - sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -s \ - -I XXXXXXX -K XXXXXXXX -S taosdata -C SMS_1111111 -B 18900000000 \ - -T '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}' - ``` - 运行程序并重启 Grafana 服务,打开面板:`http://localhost:3000/d/tdinsight`。 更多使用场景和限制请参考[TDinsight](/reference/tdinsight/) 文档。 diff --git a/docs/zh/17-operation/17-diagnose.md b/docs/zh/17-operation/17-diagnose.md index e6e9be7153dee855867c4ba4fcd1d3258c9d788f..ec529096a7513bd625131939d67c61279721b961 100644 --- a/docs/zh/17-operation/17-diagnose.md +++ b/docs/zh/17-operation/17-diagnose.md @@ -1,5 +1,6 @@ --- title: 诊断及其他 +description: 一些常见问题的诊断技巧 --- ## 网络连接诊断 diff --git a/docs/zh/20-third-party/01-grafana.mdx b/docs/zh/20-third-party/01-grafana.mdx index 93090ffd38c3ce66488826c486584dd305dbc20c..83f3f8bb25de4b99a345bafab7e8a43c3d35f14e 100644 --- a/docs/zh/20-third-party/01-grafana.mdx +++ b/docs/zh/20-third-party/01-grafana.mdx @@ -1,6 +1,7 @@ --- sidebar_label: Grafana title: Grafana +description: 使用 Grafana 与 TDengine 的详细说明 --- import Tabs from "@theme/Tabs"; @@ -193,7 +194,7 @@ docker run -d \ 如上图所示,在 Query 中选中 `TDengine` 数据源,在下方查询框可输入相应 SQL 进行查询,具体说明如下: -- INPUT SQL:输入要查询的语句(该 SQL 语句的结果集应为两列多行),例如:`select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)` ,其中,from、to 和 interval 为 TDengine 插件的内置变量,表示从 Grafana 插件面板获取的查询范围和时间间隔。除了内置变量外,`也支持可以使用自定义模板变量`。 +- INPUT SQL:输入要查询的语句(该 SQL 语句的结果集应为两列多行),例如:`select _wstart, avg(mem_system) from log.dnodes_info where ts >= $from and ts < $to interval($interval)` ,其中,from、to 和 interval 为 TDengine 插件的内置变量,表示从 Grafana 插件面板获取的查询范围和时间间隔。除了内置变量外,`也支持可以使用自定义模板变量`。 - ALIAS BY:可设置当前查询别名。 - GENERATE SQL: 点击该按钮会自动替换相应变量,并生成最终执行的语句。 @@ -205,7 +206,11 @@ docker run -d \ ### 导入 Dashboard -在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。该 Dashboard 已发布在 Grafana:[Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。其他安装方式和相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。 +在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。 + +![TDengine Database Grafana plugine import dashboard](./import_dashboard.webp) + +其中适配 TDengine 2.* 的 Dashboard 已发布在 Grafana:[Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。其他安装方式和相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。 使用 TDengine 作为数据源的其他面板,可以[在此搜索](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource)。以下是一份不完全列表: diff --git a/docs/zh/20-third-party/02-prometheus.md b/docs/zh/20-third-party/02-prometheus.md index 0fe534b8df263064e5269e1732b69893efd7a79a..eb6c3bf1d0b5f6e5d8146566969df41dbad5bf99 100644 --- a/docs/zh/20-third-party/02-prometheus.md +++ b/docs/zh/20-third-party/02-prometheus.md @@ -1,6 +1,7 @@ --- sidebar_label: Prometheus title: Prometheus +description: 使用 Prometheus 访问 TDengine --- import Prometheus from "../14-reference/_prometheus.mdx" diff --git a/docs/zh/20-third-party/03-telegraf.md b/docs/zh/20-third-party/03-telegraf.md index 88a69211c0592940d7f75d34c03bcc0593cd74d6..84883e665a84db89d564314a0e47f9caab04d6ff 100644 --- a/docs/zh/20-third-party/03-telegraf.md +++ b/docs/zh/20-third-party/03-telegraf.md @@ -1,6 +1,7 @@ --- sidebar_label: Telegraf title: Telegraf 写入 +description: 使用 Telegraf 向 TDengine 写入数据 --- import Telegraf from "../14-reference/_telegraf.mdx" diff --git a/docs/zh/20-third-party/05-collectd.md b/docs/zh/20-third-party/05-collectd.md index 04892fd42e92e962fcccadf626f67c432e78d286..cc2235f2600ec44425a2f22f39dc3c58a4ccdd5a 100644 --- a/docs/zh/20-third-party/05-collectd.md +++ b/docs/zh/20-third-party/05-collectd.md @@ -1,6 +1,7 @@ --- sidebar_label: collectd title: collectd 写入 +description: 使用 collected 向 TDengine 写入数据 --- import CollectD from "../14-reference/_collectd.mdx" diff --git a/docs/zh/20-third-party/06-statsd.md b/docs/zh/20-third-party/06-statsd.md index 260d01183598826e1c887164d0b1b146c5e80c95..122c9fd94c57ef4979d432e2a45cc5136b1644b2 100644 --- a/docs/zh/20-third-party/06-statsd.md +++ b/docs/zh/20-third-party/06-statsd.md @@ -1,6 +1,7 @@ --- sidebar_label: StatsD title: StatsD 直接写入 +description: 使用 StatsD 向 TDengine 写入 --- import StatsD from "../14-reference/_statsd.mdx" diff --git a/docs/zh/20-third-party/07-icinga2.md b/docs/zh/20-third-party/07-icinga2.md index ed1f1404a730eca5f51e2ff9bbcd54949018f8ea..06ead57655cfad7bcf88945780dbed52e9c58e16 100644 --- a/docs/zh/20-third-party/07-icinga2.md +++ b/docs/zh/20-third-party/07-icinga2.md @@ -1,6 +1,7 @@ --- sidebar_label: icinga2 title: icinga2 写入 +description: 使用 icinga2 写入 TDengine --- import Icinga2 from "../14-reference/_icinga2.mdx" diff --git a/docs/zh/20-third-party/08-tcollector.md b/docs/zh/20-third-party/08-tcollector.md index a1245e8c27f302d56f88fa382b5f38f9bd49a0aa..78d0b4a5dfda0c1a18908f5a0f5f9314e82e3737 100644 --- a/docs/zh/20-third-party/08-tcollector.md +++ b/docs/zh/20-third-party/08-tcollector.md @@ -1,6 +1,7 @@ --- sidebar_label: TCollector title: TCollector 写入 +description: 使用 TCollector 写入 TDengine --- import TCollector from "../14-reference/_tcollector.mdx" diff --git a/docs/zh/20-third-party/09-emq-broker.md b/docs/zh/20-third-party/09-emq-broker.md index dd98374558080a0ea11cbc22ede58b66a3984191..782a139e223456d0f3484d282d641075be1a3f81 100644 --- a/docs/zh/20-third-party/09-emq-broker.md +++ b/docs/zh/20-third-party/09-emq-broker.md @@ -1,6 +1,7 @@ --- sidebar_label: EMQX Broker title: EMQX Broker 写入 +description: 使用 EMQX Broker 写入 TDengine --- MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/emqx)是一开源的 MQTT Broker 软件,无需任何代码,只需要在 EMQX Dashboard 里使用“规则”做简单配置,即可将 MQTT 的数据直接写入 TDengine。EMQX 支持通过 发送到 Web 服务的方式保存数据到 TDengine,也在企业版上提供原生的 TDengine 驱动实现直接保存。 @@ -90,7 +91,7 @@ http://127.0.0.1:6041/rest/sql ``` Basic cm9vdDp0YW9zZGF0YQ== ``` -相关文档请参考[ TDengine REST API 文档](/reference/rest-api/)。 +相关文档请参考[ TDengine REST API 文档](../../connector/rest-api/)。 在消息体中输入规则引擎替换模板: diff --git a/docs/zh/20-third-party/10-hive-mq-broker.md b/docs/zh/20-third-party/10-hive-mq-broker.md index f75ed793d6272ae27f92676e2096ef455f638aa6..a388ff6daff41aa6f74af646f6121a360da56f36 100644 --- a/docs/zh/20-third-party/10-hive-mq-broker.md +++ b/docs/zh/20-third-party/10-hive-mq-broker.md @@ -1,6 +1,7 @@ --- sidebar_label: HiveMQ Broker title: HiveMQ Broker 写入 +description: 使用 HivMQ Broker 写入 TDengine --- [HiveMQ](https://www.hivemq.com/) 是一个提供免费个人版和企业版的 MQTT 代理,主要用于企业和新兴的机器到机器 M2M 通讯和内部传输,满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 [HiveMQ extension - TDengine 说明文档](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md)。 diff --git a/docs/zh/20-third-party/11-kafka.md b/docs/zh/20-third-party/11-kafka.md index 8369806adcfe1b195348e7d60160609cde9150e8..1172f4fbc5bcd9f240bd5e2a47108a8791810e76 100644 --- a/docs/zh/20-third-party/11-kafka.md +++ b/docs/zh/20-third-party/11-kafka.md @@ -1,6 +1,7 @@ --- sidebar_label: Kafka -title: TDengine Kafka Connector 使用教程 +title: TDengine Kafka Connector +description: 使用 TDengine Kafka Connector 的详细指南 --- TDengine Kafka Connector 包含两个插件: TDengine Source Connector 和 TDengine Sink Connector。用户只需提供简单的配置文件,就可以将 Kafka 中指定 topic 的数据(批量或实时)同步到 TDengine, 或将 TDengine 中指定数据库的数据(批量或实时)同步到 Kafka。 @@ -184,7 +185,7 @@ echo `cat /tmp/confluent.current`/connect/connect.stdout TDengine Sink Connector 的作用是同步指定 topic 的数据到 TDengine。用户无需提前创建数据库和超级表。可手动指定目标数据库的名字(见配置参数 connection.database), 也可按一定规则生成(见配置参数 connection.database.prefix)。 -TDengine Sink Connector 内部使用 TDengine [无模式写入接口](/reference/connector/cpp#无模式写入-api)写数据到 TDengine,目前支持三种格式的数据:[InfluxDB 行协议格式](/develop/insert-data/influxdb-line)、 [OpenTSDB Telnet 协议格式](/develop/insert-data/opentsdb-telnet) 和 [OpenTSDB JSON 协议格式](/develop/insert-data/opentsdb-json)。 +TDengine Sink Connector 内部使用 TDengine [无模式写入接口](../../connector/cpp#无模式写入-api)写数据到 TDengine,目前支持三种格式的数据:[InfluxDB 行协议格式](/develop/insert-data/influxdb-line)、 [OpenTSDB Telnet 协议格式](/develop/insert-data/opentsdb-telnet) 和 [OpenTSDB JSON 协议格式](/develop/insert-data/opentsdb-json)。 下面的示例将主题 meters 的数据,同步到目标数据库 power。数据格式为 InfluxDB Line 协议格式。 diff --git a/docs/zh/20-third-party/12-google-data-studio.md b/docs/zh/20-third-party/12-google-data-studio.md new file mode 100644 index 0000000000000000000000000000000000000000..bc06f0ea3261bcd93247e0c7b8e1d6c3628f3121 --- /dev/null +++ b/docs/zh/20-third-party/12-google-data-studio.md @@ -0,0 +1,39 @@ +--- +sidebar_label: Google Data Studio +title: TDengine Google Data Studio Connector +description: 使用 Google Data Studio 存取 TDengine 数据的详细指南 +--- + +Google Data Studio 是一个强大的报表可视化工具,它提供了丰富的数据图表和数据连接,可以非常方便地按照既定模板生成报表。因其简便易用和生态丰富而在数据分析领域得到一众数据科学家的青睐。 + +Data Studio 可以支持多种数据来源,除了诸如 Google Analytics、Google AdWords、Search Console、BigQuery 等 Google 自己的服务之外,用户也可以直接将离线文件上传至 Google Cloud Storage,或是通过连接器来接入其它数据源。 + +![01](gds/gds-01.webp) + +目前 TDengine 连接器已经发布到 Google Data Studio 应用商店,你可以在 “Connect to Data” 页面下直接搜索 TDengine,将其选作数据源。 + +![02](gds/gds-02.png.webp) + +接下来选择 AUTHORIZE 按钮。 + +![03](gds/gds-03.png.webp) + +设置允许连接自己的账号到外部服务。 + +![04](gds/gds-04.png.webp) + +在接下来的页面选择运行 TDengine REST 服务的 URL,并输入用户名、密码、数据库名称、表名称以及查询时间范围,并点击右上角的 CONNECT 按钮。 + +![05](gds/gds-05.png.webp) + +连接成功后,就可以使用 GDS 方便地进行数据处理并创建报表了。 + +![06](gds/gds-06.png.webp) + +目前的维度和指标规则是:timestamp 类型的字段和 tag 字段会被连接器定义为维度,而其他类型的字段是指标。用户还可以根据自己的需求创建不同的表。 + +![07](gds/gds-07.png.webp) +![08](gds/gds-08.png.webp) +![09](gds/gds-09.png.webp) +![10](gds/gds-10.png.webp) +![11](gds/gds-11.png.webp) diff --git a/docs/zh/20-third-party/gds/gds-01.webp b/docs/zh/20-third-party/gds/gds-01.webp new file mode 100644 index 0000000000000000000000000000000000000000..2e5f9e4ff5db1e37718e2397c9a13a9f0e05602d Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-01.webp differ diff --git a/docs/zh/20-third-party/gds/gds-02.png.webp b/docs/zh/20-third-party/gds/gds-02.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..3b3537f5a488019482f94452e70bd1bd79867ab5 Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-02.png.webp differ diff --git a/docs/zh/20-third-party/gds/gds-03.png.webp b/docs/zh/20-third-party/gds/gds-03.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..5719436d5b2f21aa861067b966511e4b34d17dce Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-03.png.webp differ diff --git a/docs/zh/20-third-party/gds/gds-04.png.webp b/docs/zh/20-third-party/gds/gds-04.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..ddaae5c1a63b6b4db692e12491df55b88dcaadee Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-04.png.webp differ diff --git a/docs/zh/20-third-party/gds/gds-05.png.webp b/docs/zh/20-third-party/gds/gds-05.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..9a917678fc7e60f0a739fa1e2b0f4fa010d12708 Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-05.png.webp differ diff --git a/docs/zh/20-third-party/gds/gds-06.png.webp b/docs/zh/20-third-party/gds/gds-06.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..c76b68d32b5907bd5ba4e4010456f2ca5303448f Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-06.png.webp differ diff --git a/docs/zh/20-third-party/gds/gds-07.png.webp b/docs/zh/20-third-party/gds/gds-07.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..1386ae9c4db4f2465dd071afc5a047658b47031c Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-07.png.webp differ diff --git a/docs/zh/20-third-party/gds/gds-08.png.webp b/docs/zh/20-third-party/gds/gds-08.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..59dcf8b31df8bde8d4073ee0c7b1c7bdd7bd439d Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-08.png.webp differ diff --git a/docs/zh/20-third-party/gds/gds-09.png.webp b/docs/zh/20-third-party/gds/gds-09.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..b94439f211a814f66d41231c9386c57f3ffe8322 Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-09.png.webp differ diff --git a/docs/zh/20-third-party/gds/gds-10.png.webp b/docs/zh/20-third-party/gds/gds-10.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..a63cad9e9a3d412b1132359506530498fb1a0e57 Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-10.png.webp differ diff --git a/docs/zh/20-third-party/gds/gds-11.png.webp b/docs/zh/20-third-party/gds/gds-11.png.webp new file mode 100644 index 0000000000000000000000000000000000000000..fc38cd9a29c00afa48238741c33b439f737a7b8f Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-11.png.webp differ diff --git a/docs/zh/20-third-party/import_dashboard.webp b/docs/zh/20-third-party/import_dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..164e3f4690a5a55f937a3c29e1e8ca026648e6b1 Binary files /dev/null and b/docs/zh/20-third-party/import_dashboard.webp differ diff --git a/docs/zh/21-tdinternal/01-arch.md b/docs/zh/21-tdinternal/01-arch.md index a910c584d6ba47844d51e45e5010581075a72fb6..704524fd210152af34e15d248d3d4dbe050e4fef 100644 --- a/docs/zh/21-tdinternal/01-arch.md +++ b/docs/zh/21-tdinternal/01-arch.md @@ -1,6 +1,7 @@ --- sidebar_label: 整体架构 title: 整体架构 +description: TDengine 架构设计,包括:集群、存储、缓存与持久化、数据备份、多级存储等 --- ## 集群与基本逻辑单元 @@ -287,7 +288,7 @@ TDengine 对每个数据采集点单独建表,但在实际应用中经常需 7. vnode 返回本节点的查询计算结果; 8. qnode 完成多节点数据聚合后将最终查询结果返回给客户端; -由于 TDengine 在 vnode 内将标签数据与时序数据分离存储,通过在内存里过滤标签数据,先找到需要参与聚合操作的表的集合,将需要扫描的数据集大幅减少,大幅提升聚合计算速度。同时,由于数据分布在多个 vnode/dnode,聚合计算操作在多个 vnode 里并发进行,又进一步提升了聚合的速度。 对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样,细节请看 TAOS SQL。 +由于 TDengine 在 vnode 内将标签数据与时序数据分离存储,通过在内存里过滤标签数据,先找到需要参与聚合操作的表的集合,将需要扫描的数据集大幅减少,大幅提升聚合计算速度。同时,由于数据分布在多个 vnode/dnode,聚合计算操作在多个 vnode 里并发进行,又进一步提升了聚合的速度。 对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样,细节请看 TDengine SQL。 ### 预计算 diff --git a/docs/zh/21-tdinternal/03-high-availability.md b/docs/zh/21-tdinternal/03-high-availability.md index ba056b6f162df90fcb271fe536a2b24d0745f75a..4cdf04f6d14d73a819f90bc2317a713c90fa9b91 100644 --- a/docs/zh/21-tdinternal/03-high-availability.md +++ b/docs/zh/21-tdinternal/03-high-availability.md @@ -1,5 +1,6 @@ --- title: 高可用 +description: TDengine 的高可用设计 --- ## Vnode 的高可用性 diff --git a/docs/zh/21-tdinternal/05-load-balance.md b/docs/zh/21-tdinternal/05-load-balance.md index 2376dd3e612a00006eaf2fc7b1782da3901908bc..07af2328d52573343fb28c045b25785f6822191f 100644 --- a/docs/zh/21-tdinternal/05-load-balance.md +++ b/docs/zh/21-tdinternal/05-load-balance.md @@ -1,5 +1,6 @@ --- title: 负载均衡 +description: TDengine 的负载均衡设计 --- TDengine 中的负载均衡主要指对时序数据的处理的负载均衡。TDengine 采用 Hash 一致性算法将一个数据库中的所有表和子表的数据均衡分散在属于该数据库的所有 vgroup 中,每张表或子表只能由一个 vgroup 处理,一个 vgroup 可能负责处理多个表或子表。 @@ -7,7 +8,7 @@ TDengine 中的负载均衡主要指对时序数据的处理的负载均衡。TD 创建数据库时可以指定其中的 vgroup 的数量: ```sql -create database db0 vgroups 100; +create database db0 vgroups 20; ``` 如何指定合适的 vgroup 的数量,这取决于系统资源。假定系统中只计划建立一个数据库,则 vgroup 数量由集群中所有 dnode 所能使用的资源决定。原则上可用的 CPU 和 Memory 越多,可建立的 vgroup 也越多。但也要考虑到磁盘性能,过多的 vgroup 在磁盘性能达到上限后反而会拖累整个系统的性能。假如系统中会建立多个数据库,则多个数据库的 vgroup 之和取决于系统中可用资源的数量。要综合考虑多个数据库之间表的数量、写入频率、数据量等多个因素在多个数据库之间分配 vgroup。实际中建议首先根据系统资源配置选择一个初始的 vgroup 数量,比如 CPU 总核数的 2 倍,以此为起点通过测试找到最佳的 vgroup 数量配置,此为系统中的 vgroup 总数。如果有多个数据库的话,再根据各个数据库的表数和数据量对 vgroup 进行分配。 diff --git a/docs/zh/21-tdinternal/index.md b/docs/zh/21-tdinternal/index.md index 63a746623e0dd955f61ba887a76f8ecf7eb16972..21f106edc999972f9e1cc4b04bc8308878cee56a 100644 --- a/docs/zh/21-tdinternal/index.md +++ b/docs/zh/21-tdinternal/index.md @@ -1,5 +1,6 @@ --- title: 技术内幕 +description: TDengine 的内部设计 --- ```mdx-code-block diff --git a/docs/zh/25-application/01-telegraf.md b/docs/zh/25-application/01-telegraf.md index 95df8699ef85b02d6e9dba398c787644fc9089b2..4e9597f96454730ebcdee5adeebf55439923e8e7 100644 --- a/docs/zh/25-application/01-telegraf.md +++ b/docs/zh/25-application/01-telegraf.md @@ -1,6 +1,7 @@ --- sidebar_label: TDengine + Telegraf + Grafana -title: 使用 TDengine + Telegraf + Grafana 快速搭建 IT 运维展示系统 +title: TDengine + Telegraf + Grafana +description: 使用 TDengine + Telegraf + Grafana 快速搭建 IT 运维展示系统 --- ## 背景介绍 @@ -34,7 +35,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ### TDengine -从涛思数据官网[下载](http://taosdata.com/cn/all-downloads/)页面下载最新 TDengine-server 2.4.0.x 或以上版本安装。 +从涛思数据官网[下载](http://taosdata.com/cn/all-downloads/)页面下载最新 TDengine-server 版本安装。 ## 数据链路设置 @@ -79,4 +80,4 @@ sudo systemctl start telegraf ## 总结 -以上演示如何快速搭建一个完整的 IT 运维展示系统。得力于 TDengine 2.4.0.0 版本中新增的 schemaless 协议解析功能,以及强大的生态软件适配能力,用户可以短短数分钟就可以搭建一个高效易用的 IT 运维系统。TDengine 强大的数据写入查询性能和其他丰富功能请参考官方文档和产品落地案例。 +以上演示如何快速搭建一个完整的 IT 运维展示系统。得力于 TDengine 的 schemaless 协议解析功能,以及强大的生态软件适配能力,用户可以短短数分钟就可以搭建一个高效易用的 IT 运维系统。TDengine 强大的数据写入查询性能和其他丰富功能请参考官方文档和产品落地案例。 diff --git a/docs/zh/25-application/02-collectd.md b/docs/zh/25-application/02-collectd.md index 78c61bb969092d7040ddcb3d02ce7bd29a784858..c6230f48abb545e3064f406d9005a4a3ba8ea5ba 100644 --- a/docs/zh/25-application/02-collectd.md +++ b/docs/zh/25-application/02-collectd.md @@ -1,6 +1,7 @@ --- sidebar_label: TDengine + collectd/StatsD + Grafana -title: 使用 TDengine + collectd/StatsD + Grafana 快速搭建 IT 运维监控系统 +title: TDengine + collectd/StatsD + Grafana +description: 使用 TDengine + collectd/StatsD + Grafana 快速搭建 IT 运维监控系统 --- ## 背景介绍 @@ -36,7 +37,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ### 安装 TDengine -从涛思数据官网[下载](http://taosdata.com/cn/all-downloads/)页面下载最新 TDengine-server 2.4.0.x 或以上版本安装。 +从涛思数据官网[下载](http://taosdata.com/cn/all-downloads/)页面下载最新 TDengine-server 版本安装。 ## 数据链路设置 @@ -90,6 +91,6 @@ repeater 部分添加 { host:'', port: diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md new file mode 100644 index 0000000000000000000000000000000000000000..e3e146313115fee12e539a161792234c2df671a5 --- /dev/null +++ b/docs/zh/28-releases/01-tdengine.md @@ -0,0 +1,16 @@ +--- +sidebar_label: TDengine 发布历史 +title: TDengine 发布历史 +description: TDengine 发布历史、Release Notes 及下载链接 +--- + +import Release from "/components/ReleaseV3"; + +## 3.0.0.1 + + + + + diff --git a/docs/zh/28-releases/02-tools.md b/docs/zh/28-releases/02-tools.md new file mode 100644 index 0000000000000000000000000000000000000000..61129d74e57504286660a178f757cb816b75dbb5 --- /dev/null +++ b/docs/zh/28-releases/02-tools.md @@ -0,0 +1,11 @@ +--- +sidebar_label: taosTools 发布历史 +title: taosTools 发布历史 +description: taosTools 的发布历史、Release Notes 和下载链接 +--- + +import Release from "/components/ReleaseV3"; + +## 2.1.2 + + \ No newline at end of file diff --git a/docs/zh/28-releases/_category_.yml b/docs/zh/28-releases/_category_.yml new file mode 100644 index 0000000000000000000000000000000000000000..dcd57247d7629e0bd46a22394c79182fccb22ede --- /dev/null +++ b/docs/zh/28-releases/_category_.yml @@ -0,0 +1 @@ +label: 发布历史 \ No newline at end of file diff --git a/examples/JDBC/JDBCDemo/README-jdbc-windows.md b/examples/JDBC/JDBCDemo/README-jdbc-windows.md index 17c5c8df00ab8727d1adfe493d3fbbd32891a676..5a781f40f730218286edb9f6a7f184ee79e7a5fc 100644 --- a/examples/JDBC/JDBCDemo/README-jdbc-windows.md +++ b/examples/JDBC/JDBCDemo/README-jdbc-windows.md @@ -129,7 +129,7 @@ https://www.taosdata.com/cn/all-downloads/ 192.168.236.136 td01 ``` -配置完成后,在命令行内使用taos shell连接server端 +配置完成后,在命令行内使用TDengine CLI连接server端 ```shell C:\TDengine>taos -h td01 diff --git a/examples/JDBC/JDBCDemo/pom.xml b/examples/JDBC/JDBCDemo/pom.xml index 8cf0356721f8ffd568e87fa4a77c86eb0f90a62b..807ceb0f24644d3978274faee1bc8b47c9d7af47 100644 --- a/examples/JDBC/JDBCDemo/pom.xml +++ b/examples/JDBC/JDBCDemo/pom.xml @@ -17,7 +17,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.34 + 3.0.0 diff --git a/examples/JDBC/SpringJdbcTemplate/pom.xml b/examples/JDBC/SpringJdbcTemplate/pom.xml index eac3dec0a92a4c8aa519cd426b9c8d3895047be6..6e4941b4f1c5bb5557109d06496bff02744a3092 100644 --- a/examples/JDBC/SpringJdbcTemplate/pom.xml +++ b/examples/JDBC/SpringJdbcTemplate/pom.xml @@ -47,7 +47,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.18 + 3.0.0 diff --git a/examples/JDBC/SpringJdbcTemplate/readme.md b/examples/JDBC/SpringJdbcTemplate/readme.md index b70a6565f88d0a08b8a26a60676e729ecdb39e2e..f59bcdbeb547b0c0576b43abe4e1f2cef2175913 100644 --- a/examples/JDBC/SpringJdbcTemplate/readme.md +++ b/examples/JDBC/SpringJdbcTemplate/readme.md @@ -10,7 +10,7 @@ ```xml - + @@ -28,5 +28,5 @@ mvn clean package ``` 打包成功之后,进入 `target/` 目录下,执行以下命令就可运行测试: ```shell -java -jar SpringJdbcTemplate-1.0-SNAPSHOT-jar-with-dependencies.jar +java -jar target/SpringJdbcTemplate-1.0-SNAPSHOT-jar-with-dependencies.jar ``` \ No newline at end of file diff --git a/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/example/jdbcTemplate/App.java b/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/example/jdbcTemplate/App.java index 6942d62a83adafb85496a81ce93866cd0d53611d..ce26b7504ae41644032c1f59579efc310f58d527 100644 --- a/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/example/jdbcTemplate/App.java +++ b/examples/JDBC/SpringJdbcTemplate/src/main/java/com/taosdata/example/jdbcTemplate/App.java @@ -28,7 +28,7 @@ public class App { //use database executor.doExecute("use test"); // create table - executor.doExecute("create table if not exists test.weather (ts timestamp, temperature int, humidity float)"); + executor.doExecute("create table if not exists test.weather (ts timestamp, temperature float, humidity int)"); WeatherDao weatherDao = ctx.getBean(WeatherDao.class); Weather weather = new Weather(new Timestamp(new Date().getTime()), random.nextFloat() * 50.0f, random.nextInt(100)); diff --git a/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/example/jdbcTemplate/BatcherInsertTest.java b/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/example/jdbcTemplate/BatcherInsertTest.java index 29d0f79fd4982d43078e590b4320c0df457ee44c..782fcbe0eb2020c8bcbafecb0b2d61185b139477 100644 --- a/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/example/jdbcTemplate/BatcherInsertTest.java +++ b/examples/JDBC/SpringJdbcTemplate/src/test/java/com/taosdata/example/jdbcTemplate/BatcherInsertTest.java @@ -41,7 +41,7 @@ public class BatcherInsertTest { //use database executor.doExecute("use test"); // create table - executor.doExecute("create table if not exists test.weather (ts timestamp, temperature int, humidity float)"); + executor.doExecute("create table if not exists test.weather (ts timestamp, temperature float, humidity int)"); } @Test diff --git a/examples/JDBC/connectionPools/README-cn.md b/examples/JDBC/connectionPools/README-cn.md index 9b26df3c2eb2c23171a673643891a292af4c920c..6e589418b11a3d6c8c64d24b28a0ea7c65ad0830 100644 --- a/examples/JDBC/connectionPools/README-cn.md +++ b/examples/JDBC/connectionPools/README-cn.md @@ -13,13 +13,13 @@ ConnectionPoolDemo的程序逻辑: ### 如何运行这个例子: ```shell script -mvn clean package assembly:single -java -jar target/connectionPools-1.0-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1 +mvn clean package +java -jar target/ConnectionPoolDemo-jar-with-dependencies.jar -host 127.0.0.1 ``` 使用mvn运行ConnectionPoolDemo的main方法,可以指定参数 ```shell script Usage: -java -jar target/connectionPools-1.0-SNAPSHOT-jar-with-dependencies.jar +java -jar target/ConnectionPoolDemo-jar-with-dependencies.jar -host : hostname -poolType -poolSize diff --git a/examples/JDBC/connectionPools/pom.xml b/examples/JDBC/connectionPools/pom.xml index 34518900ed30f48effd47a8786233080f3e5291f..61717cf1121696a97d867b5d43af75231ddd0472 100644 --- a/examples/JDBC/connectionPools/pom.xml +++ b/examples/JDBC/connectionPools/pom.xml @@ -18,7 +18,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.18 + 3.0.0 @@ -53,7 +53,7 @@ org.apache.logging.log4j log4j-core - 2.14.1 + 2.17.1 diff --git a/examples/JDBC/mybatisplus-demo/pom.xml b/examples/JDBC/mybatisplus-demo/pom.xml index ad6a63e800fb73dd3c768a8aca941f70cec235b3..5555145958de67fdf03eb744426afcfc13b6fcb3 100644 --- a/examples/JDBC/mybatisplus-demo/pom.xml +++ b/examples/JDBC/mybatisplus-demo/pom.xml @@ -47,7 +47,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.18 + 3.0.0 diff --git a/examples/JDBC/mybatisplus-demo/readme b/examples/JDBC/mybatisplus-demo/readme new file mode 100644 index 0000000000000000000000000000000000000000..b31b6c34bf1c2bd661d88fff066eb4632d456a1c --- /dev/null +++ b/examples/JDBC/mybatisplus-demo/readme @@ -0,0 +1,14 @@ +# 使用说明 + +## 创建使用db +```shell +$ taos + +> create database mp_test +``` + +## 执行测试用例 + +```shell +$ mvn clean test +``` \ No newline at end of file diff --git a/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java b/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java index 6733cbded9d1d180408eccaad9e8badad7d39a3d..1f0338db34019661a2d7c4a0716d953195d059a2 100644 --- a/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java +++ b/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java @@ -2,7 +2,17 @@ package com.taosdata.example.mybatisplusdemo.mapper; import com.baomidou.mybatisplus.core.mapper.BaseMapper; import com.taosdata.example.mybatisplusdemo.domain.Weather; +import org.apache.ibatis.annotations.Insert; +import org.apache.ibatis.annotations.Update; public interface WeatherMapper extends BaseMapper { + @Update("CREATE TABLE if not exists weather(ts timestamp, temperature float, humidity int, location nchar(100))") + int createTable(); + + @Insert("insert into weather (ts, temperature, humidity, location) values(#{ts}, #{temperature}, #{humidity}, #{location})") + int insertOne(Weather one); + + @Update("drop table if exists weather") + void dropTable(); } diff --git a/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml b/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml index 38180c6d75a620a63bcaab9ec350d97e65f9dd16..985ed1675ee408bad346dff2a1b7e03c5138f4df 100644 --- a/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml +++ b/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml @@ -2,7 +2,7 @@ spring: datasource: driver-class-name: com.taosdata.jdbc.TSDBDriver url: jdbc:TAOS://localhost:6030/mp_test?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8 - user: root + username: root password: taosdata druid: diff --git a/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java b/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java index 4331d15d3476d3428e72a186664ed77cc59aad3e..4d9dbf8d2fb909ef46dbe23a2bb5192d4971195e 100644 --- a/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java +++ b/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java @@ -82,27 +82,15 @@ public class TemperatureMapperTest { Assert.assertEquals(1, affectRows); } - /*** - * test SelectOne - * **/ - @Test - public void testSelectOne() { - QueryWrapper wrapper = new QueryWrapper<>(); - wrapper.eq("location", "beijing"); - Temperature one = mapper.selectOne(wrapper); - System.out.println(one); - Assert.assertNotNull(one); - } - /*** * test select By map * ***/ @Test public void testSelectByMap() { Map map = new HashMap<>(); - map.put("location", "beijing"); + map.put("location", "北京"); List temperatures = mapper.selectByMap(map); - Assert.assertEquals(1, temperatures.size()); + Assert.assertTrue(temperatures.size() > 1); } /*** @@ -120,7 +108,7 @@ public class TemperatureMapperTest { @Test public void testSelectCount() { int count = mapper.selectCount(null); - Assert.assertEquals(5, count); + Assert.assertEquals(10, count); } /**** diff --git a/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java b/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java index 1699344552f89e1595d1317019c992dcd3820e77..dba8abd1ed006e81cf8240e66cfcc0b525af9b79 100644 --- a/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java +++ b/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java @@ -6,6 +6,7 @@ import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.taosdata.example.mybatisplusdemo.domain.Weather; import org.junit.Assert; import org.junit.Test; +import org.junit.Before; import org.junit.runner.RunWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.context.SpringBootTest; @@ -26,6 +27,18 @@ public class WeatherMapperTest { @Autowired private WeatherMapper mapper; + @Before + public void createTable(){ + mapper.dropTable(); + mapper.createTable(); + Weather one = new Weather(); + one.setTs(new Timestamp(1605024000000l)); + one.setTemperature(12.22f); + one.setLocation("望京"); + one.setHumidity(100); + mapper.insertOne(one); + } + @Test public void testSelectList() { List weathers = mapper.selectList(null); @@ -46,20 +59,20 @@ public class WeatherMapperTest { @Test public void testSelectOne() { QueryWrapper wrapper = new QueryWrapper<>(); - wrapper.eq("location", "beijing"); + wrapper.eq("location", "望京"); Weather one = mapper.selectOne(wrapper); System.out.println(one); Assert.assertEquals(12.22f, one.getTemperature(), 0.00f); - Assert.assertEquals("beijing", one.getLocation()); + Assert.assertEquals("望京", one.getLocation()); } - @Test - public void testSelectByMap() { - Map map = new HashMap<>(); - map.put("location", "beijing"); - List weathers = mapper.selectByMap(map); - Assert.assertEquals(1, weathers.size()); - } + // @Test + // public void testSelectByMap() { + // Map map = new HashMap<>(); + // map.put("location", "beijing"); + // List weathers = mapper.selectByMap(map); + // Assert.assertEquals(1, weathers.size()); + // } @Test public void testSelectObjs() { diff --git a/examples/JDBC/readme.md b/examples/JDBC/readme.md index 9a017f4feab148cb7c3fd4132360c3075c6573cb..c7d7875308d248c1abef8d47bc69a69e91374dbb 100644 --- a/examples/JDBC/readme.md +++ b/examples/JDBC/readme.md @@ -10,4 +10,4 @@ | 6 | taosdemo | This is an internal tool for testing Our JDBC-JNI, JDBC-RESTful, RESTful interfaces | -more detail: https://www.taosdata.com/cn//documentation20/connector-java/ \ No newline at end of file +more detail: https://docs.taosdata.com/reference/connector/java/ \ No newline at end of file diff --git a/examples/JDBC/springbootdemo/pom.xml b/examples/JDBC/springbootdemo/pom.xml index 9126813b67e71691692109920f891a6fb4cc5ab5..ee15f6013e4fd35bf30fb5af00b226e7c4d3d8c7 100644 --- a/examples/JDBC/springbootdemo/pom.xml +++ b/examples/JDBC/springbootdemo/pom.xml @@ -68,7 +68,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.34 + 3.0.0 diff --git a/examples/JDBC/springbootdemo/readme.md b/examples/JDBC/springbootdemo/readme.md index 67a28947d2dfb8fc069bf94fd139a7006d35a22b..a3942a6a512501b7dee1f4f4ff5ccc93da0babbb 100644 --- a/examples/JDBC/springbootdemo/readme.md +++ b/examples/JDBC/springbootdemo/readme.md @@ -1,10 +1,11 @@ ## TDengine SpringBoot + Mybatis Demo +## 需要提前创建 test 数据库 ### 配置 application.properties ```properties # datasource config spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver -spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/log +spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/test spring.datasource.username=root spring.datasource.password=taosdata diff --git a/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java b/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java index ed720fe6c02dd3a7eba6e645ea1e76d704c04d0c..3ee5b597ab08c945f6494d9a8a31da9cd3e01f25 100644 --- a/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java +++ b/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java @@ -6,7 +6,6 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.web.bind.annotation.*; import java.util.List; -import java.util.Map; @RequestMapping("/weather") @RestController diff --git a/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml b/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml index 91938ca24e3cf9c3e0f2895cf40f214d484c55d5..99d5893ec198535d9e8ef1cc6c443625d0a64ec1 100644 --- a/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml +++ b/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml @@ -10,8 +10,7 @@ diff --git a/examples/JDBC/springbootdemo/src/main/resources/application.properties b/examples/JDBC/springbootdemo/src/main/resources/application.properties index 06daa81bbb06450d99ab3f6e640c9795c0ad5d2e..bf21047395ed534e4c7d9db919bb371fab45ec16 100644 --- a/examples/JDBC/springbootdemo/src/main/resources/application.properties +++ b/examples/JDBC/springbootdemo/src/main/resources/application.properties @@ -5,7 +5,7 @@ #spring.datasource.password=taosdata # datasource config - JDBC-RESTful spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver -spring.datasource.url=jdbc:TAOS-RS://localhsot:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8 +spring.datasource.url=jdbc:TAOS-RS://localhost:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8 spring.datasource.username=root spring.datasource.password=taosdata spring.datasource.druid.initial-size=5 diff --git a/examples/JDBC/taosdemo/pom.xml b/examples/JDBC/taosdemo/pom.xml index 91b976c2ae6c76a5ae2d7b76c3b90d05e4dae57f..724ecc74077c4080269c695ca50a1cf300e39d0b 100644 --- a/examples/JDBC/taosdemo/pom.xml +++ b/examples/JDBC/taosdemo/pom.xml @@ -10,7 +10,7 @@ Demo project for TDengine - 5.3.2 + 5.3.20 @@ -67,7 +67,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.20 + 3.0.0 @@ -75,20 +75,20 @@ com.alibaba fastjson - 1.2.75 + 1.2.83 mysql mysql-connector-java - 8.0.16 + 8.0.28 test org.apache.logging.log4j log4j-core - 2.14.1 + 2.17.1 diff --git a/examples/JDBC/taosdemo/readme.md b/examples/JDBC/taosdemo/readme.md index 451fa2960adb98e2deb8499732aefde11f4810a1..e5f4eb132b2262990b8fa32fe3c40a617d16d247 100644 --- a/examples/JDBC/taosdemo/readme.md +++ b/examples/JDBC/taosdemo/readme.md @@ -2,9 +2,9 @@ cd tests/examples/JDBC/taosdemo mvn clean package -Dmaven.test.skip=true # 先建表,再插入的 -java -jar target/taosdemo-2.0-jar-with-dependencies.jar -host [hostname] -database [database] -doCreateTable true -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100 +java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host [hostname] -database [database] -doCreateTable true -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100 # 不建表,直接插入的 -java -jar target/taosdemo-2.0-jar-with-dependencies.jar -host [hostname] -database [database] -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100 +java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host [hostname] -database [database] -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100 ``` 需求: diff --git a/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java b/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java index d4f5ff26886b9f90a4235d47bfd004dae9de93f6..6854054703776da46abdbff593724bef179f5b6d 100644 --- a/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java +++ b/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java @@ -32,8 +32,10 @@ public class TaosDemoApplication { System.exit(0); } // 初始化 - final DataSource dataSource = DataSourceFactory.getInstance(config.host, config.port, config.user, config.password); - if (config.executeSql != null && !config.executeSql.isEmpty() && !config.executeSql.replaceAll("\\s", "").isEmpty()) { + final DataSource dataSource = DataSourceFactory.getInstance(config.host, config.port, config.user, + config.password); + if (config.executeSql != null && !config.executeSql.isEmpty() + && !config.executeSql.replaceAll("\\s", "").isEmpty()) { Thread task = new Thread(new SqlExecuteTask(dataSource, config.executeSql)); task.start(); try { @@ -55,7 +57,7 @@ public class TaosDemoApplication { databaseParam.put("keep", Integer.toString(config.keep)); databaseParam.put("days", Integer.toString(config.days)); databaseParam.put("replica", Integer.toString(config.replica)); - //TODO: other database parameters + // TODO: other database parameters databaseService.createDatabase(databaseParam); databaseService.useDatabase(config.database); long end = System.currentTimeMillis(); @@ -70,11 +72,13 @@ public class TaosDemoApplication { if (config.database != null && !config.database.isEmpty()) superTableMeta.setDatabase(config.database); } else if (config.numOfFields == 0) { - String sql = "create table " + config.database + "." + config.superTable + " (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"; + String sql = "create table " + config.database + "." + config.superTable + + " (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"; superTableMeta = SuperTableMetaGenerator.generate(sql); } else { // create super table with specified field size and tag size - superTableMeta = SuperTableMetaGenerator.generate(config.database, config.superTable, config.numOfFields, config.prefixOfFields, config.numOfTags, config.prefixOfTags); + superTableMeta = SuperTableMetaGenerator.generate(config.database, config.superTable, config.numOfFields, + config.prefixOfFields, config.numOfTags, config.prefixOfTags); } /**********************************************************************************/ // 建表 @@ -84,7 +88,8 @@ public class TaosDemoApplication { superTableService.create(superTableMeta); if (!config.autoCreateTable) { // 批量建子表 - subTableService.createSubTable(superTableMeta, config.numOfTables, config.prefixOfTable, config.numOfThreadsForCreate); + subTableService.createSubTable(superTableMeta, config.numOfTables, config.prefixOfTable, + config.numOfThreadsForCreate); } } end = System.currentTimeMillis(); @@ -93,7 +98,7 @@ public class TaosDemoApplication { // 插入 long tableSize = config.numOfTables; int threadSize = config.numOfThreadsForInsert; - long startTime = getProperStartTime(config.startTime, config.keep); + long startTime = getProperStartTime(config.startTime, config.days); if (tableSize < threadSize) threadSize = (int) tableSize; @@ -101,13 +106,13 @@ public class TaosDemoApplication { start = System.currentTimeMillis(); // multi threads to insert - int affectedRows = subTableService.insertMultiThreads(superTableMeta, threadSize, tableSize, startTime, gap, config); + int affectedRows = subTableService.insertMultiThreads(superTableMeta, threadSize, tableSize, startTime, gap, + config); end = System.currentTimeMillis(); logger.info("insert " + affectedRows + " rows, time cost: " + (end - start) + " ms"); /**********************************************************************************/ // 查询 - /**********************************************************************************/ // 删除表 if (config.dropTable) { diff --git a/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/QueryService.java b/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/QueryService.java index efabff6afe904516ad9682cd7197412dc02765ef..ab0a1125d2b879d7e889e4c76cdb021ec46292f7 100644 --- a/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/QueryService.java +++ b/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/QueryService.java @@ -1,7 +1,5 @@ package com.taosdata.taosdemo.service; -import com.taosdata.jdbc.utils.SqlSyntaxValidator; - import javax.sql.DataSource; import java.sql.*; import java.util.ArrayList; @@ -23,10 +21,6 @@ public class QueryService { Boolean[] ret = new Boolean[sqls.length]; for (int i = 0; i < sqls.length; i++) { ret[i] = true; - if (!SqlSyntaxValidator.isValidForExecuteQuery(sqls[i])) { - ret[i] = false; - continue; - } try (Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement()) { stmt.executeQuery(sqls[i]); } catch (SQLException e) { diff --git a/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/SqlSpeller.java b/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/SqlSpeller.java index a60f0641d3a4441195c3a60639fbe3a197115dc3..7651d1e31814981499eb69d669b9176c73f33acd 100644 --- a/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/SqlSpeller.java +++ b/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/SqlSpeller.java @@ -15,9 +15,12 @@ public class SqlSpeller { StringBuilder sb = new StringBuilder(); sb.append("create database if not exists ").append(map.get("database")).append(" "); if (map.containsKey("keep")) - sb.append("keep ").append(map.get("keep")).append(" "); - if (map.containsKey("days")) - sb.append("days ").append(map.get("days")).append(" "); + sb.append("keep "); + if (map.containsKey("days")) { + sb.append(map.get("days")).append("d "); + } else { + sb.append(" "); + } if (map.containsKey("replica")) sb.append("replica ").append(map.get("replica")).append(" "); if (map.containsKey("cache")) @@ -29,7 +32,7 @@ public class SqlSpeller { if (map.containsKey("maxrows")) sb.append("maxrows ").append(map.get("maxrows")).append(" "); if (map.containsKey("precision")) - sb.append("precision ").append(map.get("precision")).append(" "); + sb.append("precision '").append(map.get("precision")).append("' "); if (map.containsKey("comp")) sb.append("comp ").append(map.get("comp")).append(" "); if (map.containsKey("walLevel")) @@ -46,11 +49,13 @@ public class SqlSpeller { // create table if not exists xx.xx using xx.xx tags(x,x,x) public static String createTableUsingSuperTable(SubTableMeta subTableMeta) { StringBuilder sb = new StringBuilder(); - sb.append("create table if not exists ").append(subTableMeta.getDatabase()).append(".").append(subTableMeta.getName()).append(" "); - sb.append("using ").append(subTableMeta.getDatabase()).append(".").append(subTableMeta.getSupertable()).append(" "); -// String tagStr = subTableMeta.getTags().stream().filter(Objects::nonNull) -// .map(tagValue -> tagValue.getName() + " '" + tagValue.getValue() + "' ") -// .collect(Collectors.joining(",", "(", ")")); + sb.append("create table if not exists ").append(subTableMeta.getDatabase()).append(".") + .append(subTableMeta.getName()).append(" "); + sb.append("using ").append(subTableMeta.getDatabase()).append(".").append(subTableMeta.getSupertable()) + .append(" "); + // String tagStr = subTableMeta.getTags().stream().filter(Objects::nonNull) + // .map(tagValue -> tagValue.getName() + " '" + tagValue.getValue() + "' ") + // .collect(Collectors.joining(",", "(", ")")); sb.append("tags ").append(tagValues(subTableMeta.getTags())); return sb.toString(); } @@ -63,7 +68,7 @@ public class SqlSpeller { return sb.toString(); } - //f1, f2, f3 + // f1, f2, f3 private static String fieldValues(List fields) { return IntStream.range(0, fields.size()).mapToObj(i -> { if (i == 0) { @@ -73,13 +78,13 @@ public class SqlSpeller { } }).collect(Collectors.joining(",", "(", ")")); -// return fields.stream() -// .filter(Objects::nonNull) -// .map(fieldValue -> "'" + fieldValue.getValue() + "'") -// .collect(Collectors.joining(",", "(", ")")); + // return fields.stream() + // .filter(Objects::nonNull) + // .map(fieldValue -> "'" + fieldValue.getValue() + "'") + // .collect(Collectors.joining(",", "(", ")")); } - //(f1, f2, f3),(f1, f2, f3) + // (f1, f2, f3),(f1, f2, f3) private static String rowValues(List rowValues) { return rowValues.stream().filter(Objects::nonNull) .map(rowValue -> fieldValues(rowValue.getFields())) @@ -89,8 +94,10 @@ public class SqlSpeller { // insert into xx.xxx using xx.xx tags(x,x,x) values(x,x,x),(x,x,x)... public static String insertOneTableMultiValuesUsingSuperTable(SubTableValue subTableValue) { StringBuilder sb = new StringBuilder(); - sb.append("insert into ").append(subTableValue.getDatabase()).append(".").append(subTableValue.getName()).append(" "); - sb.append("using ").append(subTableValue.getDatabase()).append(".").append(subTableValue.getSupertable()).append(" "); + sb.append("insert into ").append(subTableValue.getDatabase()).append(".").append(subTableValue.getName()) + .append(" "); + sb.append("using ").append(subTableValue.getDatabase()).append(".").append(subTableValue.getSupertable()) + .append(" "); sb.append("tags ").append(tagValues(subTableValue.getTags()) + " "); sb.append("values ").append(rowValues(subTableValue.getValues())); return sb.toString(); @@ -126,7 +133,8 @@ public class SqlSpeller { // create table if not exists xx.xx (f1 xx,f2 xx...) tags(t1 xx, t2 xx...) public static String createSuperTable(SuperTableMeta tableMetadata) { StringBuilder sb = new StringBuilder(); - sb.append("create table if not exists ").append(tableMetadata.getDatabase()).append(".").append(tableMetadata.getName()); + sb.append("create table if not exists ").append(tableMetadata.getDatabase()).append(".") + .append(tableMetadata.getName()); String fields = tableMetadata.getFields().stream() .filter(Objects::nonNull).map(field -> field.getName() + " " + field.getType() + " ") .collect(Collectors.joining(",", "(", ")")); @@ -139,10 +147,10 @@ public class SqlSpeller { return sb.toString(); } - public static String createTable(TableMeta tableMeta) { StringBuilder sb = new StringBuilder(); - sb.append("create table if not exists ").append(tableMeta.getDatabase()).append(".").append(tableMeta.getName()).append(" "); + sb.append("create table if not exists ").append(tableMeta.getDatabase()).append(".").append(tableMeta.getName()) + .append(" "); String fields = tableMeta.getFields().stream() .filter(Objects::nonNull).map(field -> field.getName() + " " + field.getType() + " ") .collect(Collectors.joining(",", "(", ")")); @@ -179,16 +187,17 @@ public class SqlSpeller { public static String insertMultiTableMultiValuesWithColumns(List tables) { StringBuilder sb = new StringBuilder(); sb.append("insert into ").append(tables.stream().filter(Objects::nonNull) - .map(table -> table.getDatabase() + "." + table.getName() + " " + columnNames(table.getColumns()) + " values " + rowValues(table.getValues())) + .map(table -> table.getDatabase() + "." + table.getName() + " " + columnNames(table.getColumns()) + + " values " + rowValues(table.getValues())) .collect(Collectors.joining(" "))); return sb.toString(); } public static String insertMultiTableMultiValues(List tables) { StringBuilder sb = new StringBuilder(); - sb.append("insert into ").append(tables.stream().filter(Objects::nonNull).map(table -> - table.getDatabase() + "." + table.getName() + " values " + rowValues(table.getValues()) - ).collect(Collectors.joining(" "))); + sb.append("insert into ").append(tables.stream().filter(Objects::nonNull) + .map(table -> table.getDatabase() + "." + table.getName() + " values " + rowValues(table.getValues())) + .collect(Collectors.joining(" "))); return sb.toString(); } } diff --git a/examples/JDBC/taosdemo/src/main/resources/application.properties b/examples/JDBC/taosdemo/src/main/resources/application.properties index 488185196f1d2325fd9896d30068cbb202180a3f..4f550f6523587c060bbb2ed889024e1653fb0cb6 100644 --- a/examples/JDBC/taosdemo/src/main/resources/application.properties +++ b/examples/JDBC/taosdemo/src/main/resources/application.properties @@ -1,5 +1,5 @@ -jdbc.driver=com.taosdata.jdbc.rs.RestfulDriver -#jdbc.driver=com.taosdata.jdbc.TSDBDriver +# jdbc.driver=com.taosdata.jdbc.rs.RestfulDriver +jdbc.driver=com.taosdata.jdbc.TSDBDriver hikari.maximum-pool-size=20 hikari.minimum-idle=20 hikari.max-lifetime=0 \ No newline at end of file diff --git a/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java b/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java deleted file mode 100644 index 1f52198d68823326dd81d8c419fc02d89e15ef2d..0000000000000000000000000000000000000000 --- a/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java +++ /dev/null @@ -1,31 +0,0 @@ -package com.taosdata.taosdemo.service; - -import com.taosdata.taosdemo.domain.TableMeta; -import org.junit.Before; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; - -public class TableServiceTest { - private TableService tableService; - - private List tables; - - @Before - public void before() { - tables = new ArrayList<>(); - for (int i = 0; i < 1; i++) { - TableMeta tableMeta = new TableMeta(); - tableMeta.setDatabase("test"); - tableMeta.setName("weather" + (i + 1)); - tables.add(tableMeta); - } - } - - @Test - public void testCreate() { - tableService.create(tables); - } - -} \ No newline at end of file diff --git a/examples/c/CMakeLists.txt b/examples/c/CMakeLists.txt index 9d06dbac6dc3ba9d4dcafe6d8316b52e1b3daeca..4a9007acecaa679dc716c5665eea7f0cd1e34dbb 100644 --- a/examples/c/CMakeLists.txt +++ b/examples/c/CMakeLists.txt @@ -13,15 +13,9 @@ IF (TD_LINUX) #TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua) add_executable(tmq "") - add_executable(tmq_taosx "") add_executable(stream_demo "") add_executable(demoapi "") - target_sources(tmq_taosx - PRIVATE - "tmq_taosx.c" - ) - target_sources(tmq PRIVATE "tmq.c" @@ -41,10 +35,6 @@ IF (TD_LINUX) taos_static ) - target_link_libraries(tmq_taosx - taos_static - ) - target_link_libraries(stream_demo taos_static ) @@ -57,10 +47,6 @@ IF (TD_LINUX) PUBLIC "${TD_SOURCE_DIR}/include/os" PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) - target_include_directories(tmq_taosx - PUBLIC "${TD_SOURCE_DIR}/include/os" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" - ) target_include_directories(stream_demo PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" @@ -73,7 +59,6 @@ IF (TD_LINUX) ) SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq) - SET_TARGET_PROPERTIES(tmq_taosx PROPERTIES OUTPUT_NAME tmq_taosx) SET_TARGET_PROPERTIES(stream_demo PROPERTIES OUTPUT_NAME stream_demo) SET_TARGET_PROPERTIES(demoapi PROPERTIES OUTPUT_NAME demoapi) ENDIF () diff --git a/examples/c/stream_demo.c b/examples/c/stream_demo.c index dd4fbc8d2d400bc9fc257202bda1979a62041895..1c9d11b755f77bf259e45d77c6e5983c3747835a 100644 --- a/examples/c/stream_demo.c +++ b/examples/c/stream_demo.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +// clang-format off #include #include #include @@ -94,14 +95,8 @@ int32_t create_stream() { } taos_free_result(pRes); - /*const char* sql = "select min(k), max(k), sum(k) from tu1";*/ - /*const char* sql = "select min(k), max(k), sum(k) as sum_of_k from st1";*/ - /*const char* sql = "select sum(k) from tu1 interval(10m)";*/ - /*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/ - pRes = - taos_query(pConn, - "create stream stream1 trigger max_delay 10s into outstb as select _wstart, sum(k) from st1 partition " - "by tbname session(ts, 10s) "); + pRes = taos_query(pConn, + "create stream stream1 trigger at_once watermark 10s into outstb as select _wstart start, avg(k) from st1 partition by tbname interval(10s)"); if (taos_errno(pRes) != 0) { printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes)); return -1; diff --git a/examples/c/tmq.c b/examples/c/tmq.c index fc34915fe75498d907381a22461f7dae6536b8a4..19adaad116ef65673f5541b5216ce12d2d9151c7 100644 --- a/examples/c/tmq.c +++ b/examples/c/tmq.c @@ -45,10 +45,9 @@ static int32_t msg_process(TAOS_RES* msg) { int32_t numOfFields = taos_field_count(msg); int32_t* length = taos_fetch_lengths(msg); int32_t precision = taos_result_precision(msg); - const char* tbName = tmq_get_table_name(msg); rows++; taos_print_row(buf, row, fields, numOfFields); - printf("row content from %s: %s\n", (tbName != NULL ? tbName : "table null"), buf); + printf("row content: %s\n", buf); } return rows; @@ -167,7 +166,7 @@ int32_t create_topic() { } taos_free_result(pRes); - pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1"); + pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3, tbname from tmqdb.stb where c1 > 1"); if (taos_errno(pRes) != 0) { printf("failed to create topic topicname, reason:%s\n", taos_errstr(pRes)); return -1; @@ -199,9 +198,7 @@ tmq_t* build_consumer() { if (TMQ_CONF_OK != code) return NULL; code = tmq_conf_set(conf, "auto.offset.reset", "earliest"); if (TMQ_CONF_OK != code) return NULL; - code = tmq_conf_set(conf, "experimental.snapshot.enable", "true"); - if (TMQ_CONF_OK != code) return NULL; - code = tmq_conf_set(conf, "msg.with.table.name", "true"); + code = tmq_conf_set(conf, "experimental.snapshot.enable", "false"); if (TMQ_CONF_OK != code) return NULL; tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); @@ -220,14 +217,7 @@ tmq_list_t* build_topic_list() { return topicList; } -void basic_consume_loop(tmq_t* tmq, tmq_list_t* topicList) { - int32_t code; - - if ((code = tmq_subscribe(tmq, topicList))) { - fprintf(stderr, "%% Failed to tmq_subscribe(): %s\n", tmq_err2str(code)); - return; - } - +void basic_consume_loop(tmq_t* tmq) { int32_t totalRows = 0; int32_t msgCnt = 0; int32_t timeout = 5000; @@ -237,8 +227,8 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topicList) { msgCnt++; totalRows += msg_process(tmqmsg); taos_free_result(tmqmsg); - /*} else {*/ - /*break;*/ + } else { + break; } } @@ -267,14 +257,12 @@ int main(int argc, char* argv[]) { return -1; } - basic_consume_loop(tmq, topic_list); - - code = tmq_unsubscribe(tmq); - if (code) { - fprintf(stderr, "%% Failed to unsubscribe: %s\n", tmq_err2str(code)); - } else { - fprintf(stderr, "%% unsubscribe\n"); + if ((code = tmq_subscribe(tmq, topic_list))) { + fprintf(stderr, "%% Failed to tmq_subscribe(): %s\n", tmq_err2str(code)); } + tmq_list_destroy(topic_list); + + basic_consume_loop(tmq); code = tmq_consumer_close(tmq); if (code) { diff --git a/examples/c/tmq_taosx.c b/examples/c/tmq_taosx.c deleted file mode 100644 index d0def4426905b773db948b0cf6f0d22c8733d5da..0000000000000000000000000000000000000000 --- a/examples/c/tmq_taosx.c +++ /dev/null @@ -1,480 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include -#include -#include -#include -#include -#include "taos.h" - -static int running = 1; - -static TAOS* use_db(){ - TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); - if (pConn == NULL) { - return NULL; - } - - TAOS_RES* pRes = taos_query(pConn, "use db_taosx"); - if (taos_errno(pRes) != 0) { - printf("error in use db_taosx, reason:%s\n", taos_errstr(pRes)); - return NULL; - } - taos_free_result(pRes); - return pConn; -} - -static void msg_process(TAOS_RES* msg) { - /*memset(buf, 0, 1024);*/ - printf("-----------topic-------------: %s\n", tmq_get_topic_name(msg)); - printf("db: %s\n", tmq_get_db_name(msg)); - printf("vg: %d\n", tmq_get_vgroup_id(msg)); - TAOS *pConn = use_db(); - if (tmq_get_res_type(msg) == TMQ_RES_TABLE_META) { - char* result = tmq_get_json_meta(msg); - if (result) { - printf("meta result: %s\n", result); - } - tmq_free_json_meta(result); - } - - tmq_raw_data raw = {0}; - tmq_get_raw(msg, &raw); - int32_t ret = tmq_write_raw(pConn, raw); - printf("write raw data: %s\n", tmq_err2str(ret)); - -// else{ -// while(1){ -// int numOfRows = 0; -// void *pData = NULL; -// taos_fetch_raw_block(msg, &numOfRows, &pData); -// if(numOfRows == 0) break; -// printf("write data: tbname:%s, numOfRows:%d\n", tmq_get_table_name(msg), numOfRows); -// int ret = taos_write_raw_block(pConn, numOfRows, pData, tmq_get_table_name(msg)); -// printf("write raw data: %s\n", tmq_err2str(ret)); -// } -// } - - taos_close(pConn); -} - -int32_t init_env() { - TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); - if (pConn == NULL) { - return -1; - } - - TAOS_RES* pRes = taos_query(pConn, "drop database if exists db_taosx"); - if (taos_errno(pRes) != 0) { - printf("error in drop db_taosx, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create database if not exists db_taosx vgroups 4"); - if (taos_errno(pRes) != 0) { - printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "drop database if exists abc1"); - if (taos_errno(pRes) != 0) { - printf("error in drop db, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create database if not exists abc1 vgroups 3"); - if (taos_errno(pRes) != 0) { - printf("error in create db, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "use abc1"); - if (taos_errno(pRes) != 0) { - printf("error in use db, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, - "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 " - "nchar(8), t4 bool)"); - if (taos_errno(pRes) != 0) { - printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table if not exists ct0 using st1 tags(1000, \"ttt\", true)"); - if (taos_errno(pRes) != 0) { - printf("failed to create child table tu1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "insert into ct0 values(1626006833600, 1, 2, 'a')"); - if (taos_errno(pRes) != 0) { - printf("failed to insert into ct0, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table if not exists ct1 using st1(t1) tags(2000)"); - if (taos_errno(pRes) != 0) { - printf("failed to create child table ct1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table if not exists ct2 using st1(t1) tags(NULL)"); - if (taos_errno(pRes) != 0) { - printf("failed to create child table ct2, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "insert into ct1 values(1626006833600, 3, 4, 'b')"); - if (taos_errno(pRes) != 0) { - printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table if not exists ct3 using st1(t1) tags(3000)"); - if (taos_errno(pRes) != 0) { - printf("failed to create child table ct3, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, 'ddd') ct0 values(1626006833602, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')"); - if (taos_errno(pRes) != 0) { - printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "alter table st1 add column c4 bigint"); - if (taos_errno(pRes) != 0) { - printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "alter table st1 modify column c3 binary(64)"); - if (taos_errno(pRes) != 0) { - printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "insert into ct3 values(1626006833605, 53, 63, 'cffffffffffffffffffffffffffff', 8989898899999) (1626006833609, 51, 62, 'c333', 940)"); - if (taos_errno(pRes) != 0) { - printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "insert into ct3 select * from ct1"); - if (taos_errno(pRes) != 0) { - printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "alter table st1 add tag t2 binary(64)"); - if (taos_errno(pRes) != 0) { - printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "alter table ct3 set tag t1=5000"); - if (taos_errno(pRes) != 0) { - printf("failed to slter child table ct3, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "delete from abc1 .ct3 where ts < 1626006833606"); - if (taos_errno(pRes) != 0) { - printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "drop table ct3 ct1"); - if (taos_errno(pRes) != 0) { - printf("failed to drop child table ct3, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "drop table st1"); - if (taos_errno(pRes) != 0) { - printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table if not exists n1(ts timestamp, c1 int, c2 nchar(4))"); - if (taos_errno(pRes) != 0) { - printf("failed to create normal table n1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "alter table n1 add column c3 bigint"); - if (taos_errno(pRes) != 0) { - printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "alter table n1 modify column c2 nchar(8)"); - if (taos_errno(pRes) != 0) { - printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "alter table n1 rename column c3 cc3"); - if (taos_errno(pRes) != 0) { - printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "alter table n1 comment 'hello'"); - if (taos_errno(pRes) != 0) { - printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "alter table n1 drop column c1"); - if (taos_errno(pRes) != 0) { - printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "insert into n1 values(now, 'eeee', 8989898899999) (now+9s, 'c333', 940)"); - if (taos_errno(pRes) != 0) { - printf("failed to insert into n1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "drop table n1"); - if (taos_errno(pRes) != 0) { - printf("failed to drop normal table n1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table jt(ts timestamp, i int) tags(t json)"); - if (taos_errno(pRes) != 0) { - printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table jt1 using jt tags('{\"k1\":1, \"k2\":\"hello\"}')"); - if (taos_errno(pRes) != 0) { - printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table jt2 using jt tags('')"); - if (taos_errno(pRes) != 0) { - printf("failed to create super table jt2, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, - "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 " - "nchar(8), t4 bool)"); - if (taos_errno(pRes) != 0) { - printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "drop table st1"); - if (taos_errno(pRes) != 0) { - printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - taos_close(pConn); - return 0; -} - -int32_t create_topic() { - printf("create topic\n"); - TAOS_RES* pRes; - TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); - if (pConn == NULL) { - return -1; - } - - pRes = taos_query(pConn, "use abc1"); - if (taos_errno(pRes) != 0) { - printf("error in use db, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1"); - if (taos_errno(pRes) != 0) { - printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - taos_close(pConn); - return 0; -} - -void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { - printf("commit %d tmq %p param %p\n", code, tmq, param); -} - -tmq_t* build_consumer() { -#if 0 - TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); - assert(pConn != NULL); - - TAOS_RES* pRes = taos_query(pConn, "use abc1"); - if (taos_errno(pRes) != 0) { - printf("error in use db, reason:%s\n", taos_errstr(pRes)); - } - taos_free_result(pRes); -#endif - - tmq_conf_t* conf = tmq_conf_new(); - tmq_conf_set(conf, "group.id", "tg2"); - tmq_conf_set(conf, "client.id", "my app 1"); - tmq_conf_set(conf, "td.connect.user", "root"); - tmq_conf_set(conf, "td.connect.pass", "taosdata"); - tmq_conf_set(conf, "msg.with.table.name", "true"); - tmq_conf_set(conf, "enable.auto.commit", "true"); - - /*tmq_conf_set(conf, "experimental.snapshot.enable", "true");*/ - - tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); - tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); - assert(tmq); - tmq_conf_destroy(conf); - return tmq; -} - -tmq_list_t* build_topic_list() { - tmq_list_t* topic_list = tmq_list_new(); - tmq_list_append(topic_list, "topic_ctb_column"); - /*tmq_list_append(topic_list, "tmq_test_db_multi_insert_topic");*/ - return topic_list; -} - -void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) { - int32_t code; - - if ((code = tmq_subscribe(tmq, topics))) { - fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code)); - printf("subscribe err\n"); - return; - } - int32_t cnt = 0; - while (running) { - TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, -1); - if (tmqmessage) { - cnt++; - msg_process(tmqmessage); - /*if (cnt >= 2) break;*/ - /*printf("get data\n");*/ - taos_free_result(tmqmessage); - /*} else {*/ - /*break;*/ - /*tmq_commit_sync(tmq, NULL);*/ - } - } - - code = tmq_consumer_close(tmq); - if (code) - fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code)); - else - fprintf(stderr, "%% Consumer closed\n"); -} - -void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) { - static const int MIN_COMMIT_COUNT = 1; - - int msg_count = 0; - int32_t code; - - if ((code = tmq_subscribe(tmq, topics))) { - fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code)); - return; - } - - tmq_list_t* subList = NULL; - tmq_subscription(tmq, &subList); - char** subTopics = tmq_list_to_c_array(subList); - int32_t sz = tmq_list_get_size(subList); - printf("subscribed topics: "); - for (int32_t i = 0; i < sz; i++) { - printf("%s, ", subTopics[i]); - } - printf("\n"); - tmq_list_destroy(subList); - - while (running) { - TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000); - if (tmqmessage) { - msg_process(tmqmessage); - taos_free_result(tmqmessage); - - /*tmq_commit_sync(tmq, NULL);*/ - /*if ((++msg_count % MIN_COMMIT_COUNT) == 0) tmq_commit(tmq, NULL, 0);*/ - } - } - - code = tmq_consumer_close(tmq); - if (code) - fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code)); - else - fprintf(stderr, "%% Consumer closed\n"); -} - -int main(int argc, char* argv[]) { - printf("env init\n"); - if (init_env() < 0) { - return -1; - } - create_topic(); - - tmq_t* tmq = build_consumer(); - tmq_list_t* topic_list = build_topic_list(); - basic_consume_loop(tmq, topic_list); - /*sync_consume_loop(tmq, topic_list);*/ -} diff --git a/examples/nodejs/README-win.md b/examples/nodejs/README-win.md index 75fec69413af2bb49498118ec7235c9947e2f89e..e496be2f87e3ff0fcc01359f23888734669b0c22 100644 --- a/examples/nodejs/README-win.md +++ b/examples/nodejs/README-win.md @@ -35,7 +35,7 @@ Python 2.7.18 下载地址:https://www.taosdata.com/cn/all-downloads/,选择一个合适的windows-client下载(client应该尽量与server端的版本保持一致) -使用client的taos shell连接server +使用client的TDengine CLI连接server ```shell >taos -h node5 diff --git a/examples/rust/.gitignore b/examples/rust/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..96ef6c0b944e24fc22f51f18136cd62ffd5b0b8f --- /dev/null +++ b/examples/rust/.gitignore @@ -0,0 +1,2 @@ +/target +Cargo.lock diff --git a/examples/rust/Cargo.toml b/examples/rust/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..1ed73e2fde68fe06bc49690c8112b1d6a145548a --- /dev/null +++ b/examples/rust/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "rust" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +taos = "*" + +[dev-dependencies] +chrono = "0.4" +itertools = "0.10.3" +pretty_env_logger = "0.4.0" +serde = { version = "1", features = ["derive"] } +serde_json = "1" +tokio = { version = "1", features = ["full"] } +anyhow = "1" diff --git a/examples/rust/examples/bind-tags.rs b/examples/rust/examples/bind-tags.rs new file mode 100644 index 0000000000000000000000000000000000000000..a1f7286625723fbf2501212fe576d29ee83018c6 --- /dev/null +++ b/examples/rust/examples/bind-tags.rs @@ -0,0 +1,80 @@ +use anyhow::Result; +use serde::Deserialize; +use taos::*; + +#[tokio::main] +async fn main() -> Result<()> { + let taos = TaosBuilder::from_dsn("taos://")?.build()?; + taos.exec_many([ + "drop database if exists test", + "create database test keep 36500", + "use test", + "create table tb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, + c6 tinyint unsigned, c7 smallint unsigned, c8 int unsigned, c9 bigint unsigned, + c10 float, c11 double, c12 varchar(100), c13 nchar(100)) tags(t1 varchar(100))", + ]) + .await?; + let mut stmt = Stmt::init(&taos)?; + stmt.prepare( + "insert into ? using tb1 tags(?) values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", + )?; + stmt.set_tbname("d0")?; + stmt.set_tags(&[Value::VarChar("涛思".to_string())])?; + + let params = vec![ + ColumnView::from_millis_timestamp(vec![164000000000]), + ColumnView::from_bools(vec![true]), + ColumnView::from_tiny_ints(vec![i8::MAX]), + ColumnView::from_small_ints(vec![i16::MAX]), + ColumnView::from_ints(vec![i32::MAX]), + ColumnView::from_big_ints(vec![i64::MAX]), + ColumnView::from_unsigned_tiny_ints(vec![u8::MAX]), + ColumnView::from_unsigned_small_ints(vec![u16::MAX]), + ColumnView::from_unsigned_ints(vec![u32::MAX]), + ColumnView::from_unsigned_big_ints(vec![u64::MAX]), + ColumnView::from_floats(vec![f32::MAX]), + ColumnView::from_doubles(vec![f64::MAX]), + ColumnView::from_varchar(vec!["ABC"]), + ColumnView::from_nchar(vec!["涛思数据"]), + ]; + let rows = stmt.bind(¶ms)?.add_batch()?.execute()?; + assert_eq!(rows, 1); + + #[derive(Debug, Deserialize)] + #[allow(dead_code)] + struct Row { + ts: String, + c1: bool, + c2: i8, + c3: i16, + c4: i32, + c5: i64, + c6: u8, + c7: u16, + c8: u32, + c9: u64, + c10: Option, + c11: f64, + c12: String, + c13: String, + t1: serde_json::Value, + } + + let rows: Vec = taos + .query("select * from tb1") + .await? + .deserialize() + .try_collect() + .await?; + let row = &rows[0]; + dbg!(&row); + assert_eq!(row.c5, i64::MAX); + assert_eq!(row.c8, u32::MAX); + assert_eq!(row.c9, u64::MAX); + assert_eq!(row.c10.unwrap(), f32::MAX); + // assert_eq!(row.c11, f64::MAX); + assert_eq!(row.c12, "ABC"); + assert_eq!(row.c13, "涛思数据"); + + Ok(()) +} diff --git a/examples/rust/examples/bind.rs b/examples/rust/examples/bind.rs new file mode 100644 index 0000000000000000000000000000000000000000..194938a319709f9eb6ccdf6d705f023a338fbb49 --- /dev/null +++ b/examples/rust/examples/bind.rs @@ -0,0 +1,74 @@ +use anyhow::Result; +use serde::Deserialize; +use taos::*; + +#[tokio::main] +async fn main() -> Result<()> { + let taos = TaosBuilder::from_dsn("taos://")?.build()?; + taos.exec_many([ + "drop database if exists test_bindable", + "create database test_bindable keep 36500", + "use test_bindable", + "create table tb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, + c6 tinyint unsigned, c7 smallint unsigned, c8 int unsigned, c9 bigint unsigned, + c10 float, c11 double, c12 varchar(100), c13 nchar(100))", + ]) + .await?; + let mut stmt = Stmt::init(&taos)?; + stmt.prepare("insert into tb1 values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")?; + let params = vec![ + ColumnView::from_millis_timestamp(vec![0]), + ColumnView::from_bools(vec![true]), + ColumnView::from_tiny_ints(vec![i8::MAX]), + ColumnView::from_small_ints(vec![i16::MAX]), + ColumnView::from_ints(vec![i32::MAX]), + ColumnView::from_big_ints(vec![i64::MAX]), + ColumnView::from_unsigned_tiny_ints(vec![u8::MAX]), + ColumnView::from_unsigned_small_ints(vec![u16::MAX]), + ColumnView::from_unsigned_ints(vec![u32::MAX]), + ColumnView::from_unsigned_big_ints(vec![u64::MAX]), + ColumnView::from_floats(vec![f32::MAX]), + ColumnView::from_doubles(vec![f64::MAX]), + ColumnView::from_varchar(vec!["ABC"]), + ColumnView::from_nchar(vec!["涛思数据"]), + ]; + let rows = stmt.bind(¶ms)?.add_batch()?.execute()?; + assert_eq!(rows, 1); + + #[derive(Debug, Deserialize)] + #[allow(dead_code)] + struct Row { + ts: String, + c1: bool, + c2: i8, + c3: i16, + c4: i32, + c5: i64, + c6: u8, + c7: u16, + c8: u32, + c9: u64, + c10: Option, + c11: f64, + c12: String, + c13: String, + } + + let rows: Vec = taos + .query("select * from tb1") + .await? + .deserialize() + .try_collect() + .await?; + let row = &rows[0]; + dbg!(&row); + assert_eq!(row.c5, i64::MAX); + assert_eq!(row.c8, u32::MAX); + assert_eq!(row.c9, u64::MAX); + assert_eq!(row.c10.unwrap(), f32::MAX); + // assert_eq!(row.c11, f64::MAX); + assert_eq!(row.c12, "ABC"); + assert_eq!(row.c13, "涛思数据"); + + Ok(()) +} diff --git a/examples/rust/examples/query.rs b/examples/rust/examples/query.rs new file mode 100644 index 0000000000000000000000000000000000000000..016b291abc95f0c7c9f89d8345b91136df20d20b --- /dev/null +++ b/examples/rust/examples/query.rs @@ -0,0 +1,106 @@ +use std::time::Duration; + +use chrono::{DateTime, Local}; +use taos::*; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let dsn = "taos://"; + + let opts = PoolBuilder::new() + .max_size(5000) // max connections + .max_lifetime(Some(Duration::from_secs(60 * 60))) // lifetime of each connection + .min_idle(Some(1000)) // minimal idle connections + .connection_timeout(Duration::from_secs(2)); + + let pool = TaosBuilder::from_dsn(dsn)?.with_pool_builder(opts)?; + + let taos = pool.get()?; + + let db = "query"; + + // prepare database + taos.exec_many([ + format!("DROP DATABASE IF EXISTS `{db}`"), + format!("CREATE DATABASE `{db}`"), + format!("USE `{db}`"), + ]) + .await?; + + let inserted = taos.exec_many([ + // create super table + "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))", + // create child table + "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')", + // insert into child table + "INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)", + // insert with NULL values + "INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)", + // insert and automatically create table with tags if not exists + "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)", + // insert many records in a single sql + "INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)", + ]).await?; + + assert_eq!(inserted, 6); + loop { + let count: usize = taos + .query_one("select count(*) from `meters`") + .await? + .unwrap_or_default(); + + if count >= 6 { + break; + } else { + println!("waiting for data"); + } + } + + let mut result = taos.query("select tbname, * from `meters`").await?; + + for field in result.fields() { + println!("got field: {}", field.name()); + } + + // Query option 1, use rows stream. + let mut rows = result.rows(); + let mut nrows = 0; + while let Some(row) = rows.try_next().await? { + for (col, (name, value)) in row.enumerate() { + println!( + "[{}] got value in col {} (named `{:>8}`): {}", + nrows, col, name, value + ); + } + nrows += 1; + } + + // Query options 2, use deserialization with serde. + #[derive(Debug, serde::Deserialize)] + #[allow(dead_code)] + struct Record { + tbname: String, + // deserialize timestamp to chrono::DateTime + ts: DateTime, + // float to f32 + current: Option, + // int to i32 + voltage: Option, + phase: Option, + groupid: i32, + // binary/varchar to String + location: String, + } + + let records: Vec = taos + .query("select tbname, * from `meters`") + .await? + .deserialize() + .try_collect() + .await?; + + dbg!(result.summary()); + assert_eq!(records.len(), 6); + dbg!(records); + Ok(()) +} diff --git a/examples/rust/examples/subscribe.rs b/examples/rust/examples/subscribe.rs new file mode 100644 index 0000000000000000000000000000000000000000..9e2e890405f031cbd492fec22dd7aedebdfbb7df --- /dev/null +++ b/examples/rust/examples/subscribe.rs @@ -0,0 +1,103 @@ +use std::time::Duration; + +use chrono::{DateTime, Local}; +use taos::*; + +// Query options 2, use deserialization with serde. +#[derive(Debug, serde::Deserialize)] +#[allow(dead_code)] +struct Record { + // deserialize timestamp to chrono::DateTime + ts: DateTime, + // float to f32 + current: Option, + // int to i32 + voltage: Option, + phase: Option, +} + +async fn prepare(taos: Taos) -> anyhow::Result<()> { + let inserted = taos.exec_many([ + // create child table + "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')", + // insert into child table + "INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)", + // insert with NULL values + "INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)", + // insert and automatically create table with tags if not exists + "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)", + // insert many records in a single sql + "INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)", + ]).await?; + assert_eq!(inserted, 6); + Ok(()) +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + // std::env::set_var("RUST_LOG", "debug"); + pretty_env_logger::init(); + let dsn = "taos://localhost:6030"; + let builder = TaosBuilder::from_dsn(dsn)?; + + let taos = builder.build()?; + let db = "tmq"; + + // prepare database + taos.exec_many([ + "DROP TOPIC IF EXISTS tmq_meters".to_string(), + format!("DROP DATABASE IF EXISTS `{db}`"), + format!("CREATE DATABASE `{db}`"), + format!("USE `{db}`"), + // create super table + "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))".to_string(), + // create topic for subscription + format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}") + ]) + .await?; + + let task = tokio::spawn(prepare(taos)); + + tokio::time::sleep(Duration::from_secs(1)).await; + + // subscribe + let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; + + let mut consumer = tmq.build()?; + consumer.subscribe(["tmq_meters"]).await?; + + { + let mut stream = consumer.stream(); + + while let Some((offset, message)) = stream.try_next().await? { + // get information from offset + + // the topic + let topic = offset.topic(); + // the vgroup id, like partition id in kafka. + let vgroup_id = offset.vgroup_id(); + println!("* in vgroup id {vgroup_id} of topic {topic}\n"); + + if let Some(data) = message.into_data() { + while let Some(block) = data.fetch_raw_block().await? { + // one block for one table, get table name if needed + let name = block.table_name(); + let records: Vec = block.deserialize().try_collect()?; + println!( + "** table: {}, got {} records: {:#?}\n", + name.unwrap(), + records.len(), + records + ); + } + } + consumer.commit(offset).await?; + } + } + + consumer.unsubscribe().await; + + task.await??; + + Ok(()) +} diff --git a/examples/rust/src/main.rs b/examples/rust/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..e7a11a969c037e00a796aafeff6258501ec15e9a --- /dev/null +++ b/examples/rust/src/main.rs @@ -0,0 +1,3 @@ +fn main() { + println!("Hello, world!"); +} diff --git a/include/client/taos.h b/include/client/taos.h index dd7266bd96dcb74bb19837ab470a744925db4e64..49cfbb52b80e88103fe6befc6d2818641e731fcf 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -131,10 +131,10 @@ DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...); DLL_EXPORT setConfRet taos_set_config(const char *config); DLL_EXPORT int taos_init(void); DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); -DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port); -DLL_EXPORT void taos_close(TAOS *taos); +DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port); +DLL_EXPORT void taos_close(TAOS *taos); -const char *taos_data_type(int type); +const char *taos_data_type(int type); DLL_EXPORT TAOS_STMT *taos_stmt_init(TAOS *taos); DLL_EXPORT int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length); @@ -244,33 +244,38 @@ DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_comm /* -------------------------TMQ MSG HANDLE INTERFACE---------------------- */ +DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); +DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); +DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); + +/* ------------------------------ TAOSX -----------------------------------*/ +// note: following apis are unstable enum tmq_res_t { TMQ_RES_INVALID = -1, TMQ_RES_DATA = 1, TMQ_RES_TABLE_META = 2, + TMQ_RES_TAOSX = 3, }; -typedef struct tmq_raw_data{ - void* raw; +typedef struct tmq_raw_data { + void *raw; uint32_t raw_len; uint16_t raw_type; } tmq_raw_data; typedef enum tmq_res_t tmq_res_t; -DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res); -DLL_EXPORT int32_t tmq_get_raw(TAOS_RES *res, tmq_raw_data *raw); -DLL_EXPORT int32_t tmq_write_raw(TAOS *taos, tmq_raw_data raw); -DLL_EXPORT int taos_write_raw_block(TAOS *taos, int numOfRows, char *pData, const char* tbname); -DLL_EXPORT void tmq_free_raw(tmq_raw_data raw); -DLL_EXPORT char *tmq_get_json_meta(TAOS_RES *res); // Returning null means error. Returned result need to be freed by tmq_free_json_meta -DLL_EXPORT void tmq_free_json_meta(char* jsonMeta); -DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); -DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); -DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); -DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res); - -/* ------------------------------ TMQ END -------------------------------- */ +DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res); +DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res); +DLL_EXPORT int32_t tmq_get_raw(TAOS_RES *res, tmq_raw_data *raw); +DLL_EXPORT int32_t tmq_write_raw(TAOS *taos, tmq_raw_data raw); +DLL_EXPORT int taos_write_raw_block(TAOS *taos, int numOfRows, char *pData, const char *tbname); +DLL_EXPORT void tmq_free_raw(tmq_raw_data raw); +// Returning null means error. Returned result need to be freed by tmq_free_json_meta +DLL_EXPORT char *tmq_get_json_meta(TAOS_RES *res); +DLL_EXPORT void tmq_free_json_meta(char *jsonMeta); + +/* ---------------------------- TAOSX END -------------------------------- */ typedef enum { TSDB_SRV_STATUS_UNAVAILABLE = 0, diff --git a/include/common/systable.h b/include/common/systable.h index ed2e6a46c35006f8f9ffc189a98f3df5e2ac9ade..882c54de952dc044ed30aa6a1aed66145c0db804 100644 --- a/include/common/systable.h +++ b/include/common/systable.h @@ -22,54 +22,58 @@ extern "C" { #ifndef TDENGINE_SYSTABLE_H #define TDENGINE_SYSTABLE_H -#define TSDB_INFORMATION_SCHEMA_DB "information_schema" -#define TSDB_INS_TABLE_DNODES "ins_dnodes" -#define TSDB_INS_TABLE_MNODES "ins_mnodes" -#define TSDB_INS_TABLE_MODULES "ins_modules" -#define TSDB_INS_TABLE_QNODES "ins_qnodes" -#define TSDB_INS_TABLE_BNODES "ins_bnodes" -#define TSDB_INS_TABLE_SNODES "ins_snodes" -#define TSDB_INS_TABLE_CLUSTER "ins_cluster" -#define TSDB_INS_TABLE_DATABASES "ins_databases" -#define TSDB_INS_TABLE_FUNCTIONS "ins_functions" -#define TSDB_INS_TABLE_INDEXES "ins_indexes" -#define TSDB_INS_TABLE_STABLES "ins_stables" -#define TSDB_INS_TABLE_TABLES "ins_tables" -#define TSDB_INS_TABLE_TAGS "ins_tags" -#define TSDB_INS_TABLE_TABLE_DISTRIBUTED "ins_table_distributed" -#define TSDB_INS_TABLE_USERS "ins_users" -#define TSDB_INS_TABLE_LICENCES "ins_grants" -#define TSDB_INS_TABLE_VGROUPS "ins_vgroups" -#define TSDB_INS_TABLE_VNODES "ins_vnodes" -#define TSDB_INS_TABLE_CONFIGS "ins_configs" -#define TSDB_INS_TABLE_DNODE_VARIABLES "ins_dnode_variables" +#define TSDB_INFORMATION_SCHEMA_DB "information_schema" +#define TSDB_INS_TABLE_DNODES "ins_dnodes" +#define TSDB_INS_TABLE_MNODES "ins_mnodes" +#define TSDB_INS_TABLE_MODULES "ins_modules" +#define TSDB_INS_TABLE_QNODES "ins_qnodes" +#define TSDB_INS_TABLE_BNODES "ins_bnodes" +#define TSDB_INS_TABLE_SNODES "ins_snodes" +#define TSDB_INS_TABLE_CLUSTER "ins_cluster" +#define TSDB_INS_TABLE_DATABASES "ins_databases" +#define TSDB_INS_TABLE_FUNCTIONS "ins_functions" +#define TSDB_INS_TABLE_INDEXES "ins_indexes" +#define TSDB_INS_TABLE_STABLES "ins_stables" +#define TSDB_INS_TABLE_TABLES "ins_tables" +#define TSDB_INS_TABLE_TAGS "ins_tags" +#define TSDB_INS_TABLE_TABLE_DISTRIBUTED "ins_table_distributed" +#define TSDB_INS_TABLE_USERS "ins_users" +#define TSDB_INS_TABLE_LICENCES "ins_grants" +#define TSDB_INS_TABLE_VGROUPS "ins_vgroups" +#define TSDB_INS_TABLE_VNODES "ins_vnodes" +#define TSDB_INS_TABLE_CONFIGS "ins_configs" +#define TSDB_INS_TABLE_DNODE_VARIABLES "ins_dnode_variables" +#define TSDB_INS_TABLE_SUBSCRIPTIONS "ins_subscriptions" +#define TSDB_INS_TABLE_TOPICS "ins_topics" +#define TSDB_INS_TABLE_STREAMS "ins_streams" #define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema" #define TSDB_PERFS_TABLE_SMAS "perf_smas" #define TSDB_PERFS_TABLE_CONNECTIONS "perf_connections" #define TSDB_PERFS_TABLE_QUERIES "perf_queries" -#define TSDB_PERFS_TABLE_TOPICS "perf_topics" #define TSDB_PERFS_TABLE_CONSUMERS "perf_consumers" -#define TSDB_PERFS_TABLE_SUBSCRIPTIONS "perf_subscriptions" #define TSDB_PERFS_TABLE_OFFSETS "perf_offsets" #define TSDB_PERFS_TABLE_TRANS "perf_trans" -#define TSDB_PERFS_TABLE_STREAMS "perf_streams" #define TSDB_PERFS_TABLE_APPS "perf_apps" typedef struct SSysDbTableSchema { const char* name; const int32_t type; const int32_t bytes; + const bool sysInfo; } SSysDbTableSchema; typedef struct SSysTableMeta { const char* name; const SSysDbTableSchema* schema; const int32_t colNum; + const bool sysInfo; } SSysTableMeta; void getInfosDbMeta(const SSysTableMeta** pInfosTableMeta, size_t* size); void getPerfDbMeta(const SSysTableMeta** pPerfsTableMeta, size_t* size); +void getVisibleInfosTablesNum(bool sysInfo, size_t* size); +bool invisibleColumn(bool sysInfo, int8_t tableType, int8_t flags); #ifdef __cplusplus } diff --git a/include/common/tcommon.h b/include/common/tcommon.h index e04d9d5e86738d19155402e25c05fbeee6be85bd..891c9ab040cfa6acdff55be1889a2bebe01ec2d3 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -44,11 +44,36 @@ enum { ) // clang-format on +typedef struct { + TSKEY ts; + uint64_t groupId; +} SWinKey; + +static inline int SWinKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) { + SWinKey* pWin1 = (SWinKey*)pKey1; + SWinKey* pWin2 = (SWinKey*)pKey2; + + if (pWin1->groupId > pWin2->groupId) { + return 1; + } else if (pWin1->groupId < pWin2->groupId) { + return -1; + } + + if (pWin1->ts > pWin2->ts) { + return 1; + } else if (pWin1->ts < pWin2->ts) { + return -1; + } + + return 0; +} + enum { TMQ_MSG_TYPE__DUMMY = 0, TMQ_MSG_TYPE__POLL_RSP, TMQ_MSG_TYPE__POLL_META_RSP, TMQ_MSG_TYPE__EP_RSP, + TMQ_MSG_TYPE__TAOSX_RSP, TMQ_MSG_TYPE__END_RSP, }; @@ -60,6 +85,7 @@ enum { STREAM_INPUT__DATA_RETRIEVE, STREAM_INPUT__GET_RES, STREAM_INPUT__CHECKPOINT, + STREAM_INPUT__DESTROY, }; typedef enum EStreamType { @@ -104,7 +130,6 @@ typedef struct SDataBlockInfo { uint32_t capacity; // TODO: optimize and remove following int64_t version; // used for stream, and need serialization - int64_t ts; // used for stream, and need serialization int32_t childId; // used for stream, do not serialize EStreamType type; // used for stream, do not serialize STimeWindow calWin; // used for stream, do not serialize @@ -159,6 +184,7 @@ typedef struct SQueryTableDataCond { STimeWindow twindows; int64_t startVersion; int64_t endVersion; + int64_t schemaVersion; } SQueryTableDataCond; int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock); @@ -180,7 +206,7 @@ typedef struct SColumn { int16_t slotId; char name[TSDB_COL_NAME_LEN]; - int8_t flag; // column type: normal column, tag, or user-input column (integer/float/string) + int16_t colType; // column type: normal column, tag, or window column int16_t type; int32_t bytes; uint8_t precision; diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h index 3679b3773b2c9a89147e162b568479cfb44912db..df16f4f0ab9ad1a79c11ede9e54fdc086e9204df 100644 --- a/include/common/tdataformat.h +++ b/include/common/tdataformat.h @@ -38,22 +38,18 @@ typedef struct STagVal STagVal; typedef struct STag STag; // bitmap -#define N1(n) ((1 << (n)) - 1) -#define BIT1_SIZE(n) (((n)-1) / 8 + 1) -#define BIT2_SIZE(n) (((n)-1) / 4 + 1) -#define SET_BIT1(p, i, v) \ - do { \ - (p)[(i) / 8] &= N1((i) % 8); \ - (p)[(i) / 8] |= (((uint8_t)(v)) << (((i) % 8))); \ - } while (0) - -#define GET_BIT1(p, i) (((p)[(i) / 8] >> ((i) % 8)) & ((uint8_t)1)) -#define SET_BIT2(p, i, v) \ - do { \ - p[(i) / 4] &= N1((i) % 4 * 2); \ - (p)[(i) / 4] |= (((uint8_t)(v)) << (((i) % 4) * 2)); \ - } while (0) -#define GET_BIT2(p, i) (((p)[(i) / 4] >> (((i) % 4) * 2)) & ((uint8_t)3)) +const static uint8_t BIT2_MAP[4][4] = {{0b00000000, 0b00000001, 0b00000010, 0}, + {0b00000000, 0b00000100, 0b00001000, 2}, + {0b00000000, 0b00010000, 0b00100000, 4}, + {0b00000000, 0b01000000, 0b10000000, 6}}; + +#define N1(n) ((((uint8_t)1) << (n)) - 1) +#define BIT1_SIZE(n) ((((n)-1) >> 3) + 1) +#define BIT2_SIZE(n) ((((n)-1) >> 2) + 1) +#define SET_BIT1(p, i, v) ((p)[(i) >> 3] = (p)[(i) >> 3] & N1((i)&7) | (((uint8_t)(v)) << ((i)&7))) +#define GET_BIT1(p, i) (((p)[(i) >> 3] >> ((i)&7)) & ((uint8_t)1)) +#define SET_BIT2(p, i, v) ((p)[(i) >> 2] = (p)[(i) >> 2] & N1(BIT2_MAP[(i)&3][3]) | BIT2_MAP[(i)&3][(v)]) +#define GET_BIT2(p, i) (((p)[(i) >> 2] >> BIT2_MAP[(i)&3][3]) & ((uint8_t)3)) // STSchema int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t nCols, STSchema **ppTSchema); @@ -100,6 +96,7 @@ char *tTagValToData(const STagVal *pTagVal, bool isJson); int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag); int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag); int32_t tTagToValArray(const STag *pTag, SArray **ppArray); +void tTagSetCid(const STag *pTag, int16_t iTag, int16_t cid); void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf); @@ -171,7 +168,7 @@ struct SColVal { #pragma pack(push, 1) struct STagVal { -// char colName[TSDB_COL_NAME_LEN]; // only used for tmq_get_meta + // char colName[TSDB_COL_NAME_LEN]; // only used for tmq_get_meta union { int16_t cid; char *pKey; diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 9111728e1ad15d7cfc105a5a65ee8364f7ab2f95..f72cb3d6d98b1f4a5e3db20257dfa8aa77e56747 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -66,6 +66,7 @@ extern int32_t tsNumOfVnodeStreamThreads; extern int32_t tsNumOfVnodeFetchThreads; extern int32_t tsNumOfVnodeWriteThreads; extern int32_t tsNumOfVnodeSyncThreads; +extern int32_t tsNumOfVnodeRsmaThreads; extern int32_t tsNumOfQnodeQueryThreads; extern int32_t tsNumOfQnodeFetchThreads; extern int32_t tsNumOfSnodeSharedThreads; @@ -130,6 +131,7 @@ extern int32_t tsMqRebalanceInterval; extern int32_t tsTtlUnit; extern int32_t tsTtlPushInterval; extern int32_t tsGrantHBInterval; +extern int32_t tsUptimeInterval; #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) @@ -139,12 +141,11 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile bool tsc); void taosCleanupCfg(); void taosCfgDynamicOptions(const char *option, const char *value); -void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary); struct SConfig *taosGetCfg(); -void taosSetAllDebugFlag(int32_t flag); -void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal); +void taosSetAllDebugFlag(int32_t flag, bool rewrite); +void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite); int32_t taosSetCfg(SConfig *pCfg, char *name); void taosLocalCfgForbiddenToChange(char* name, bool* forbidden); diff --git a/include/common/tmsg.h b/include/common/tmsg.h index cc15d4ed6b89e32f38862bc5a9955dc8f2502772..41bd11d34749ebe8d6e77aa13aa82cbfe0b8a90e 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -268,14 +268,41 @@ STSRow* tGetSubmitBlkNext(SSubmitBlkIter* pIter); // for debug int32_t tPrintFixedSchemaSubmitReq(SSubmitReq* pReq, STSchema* pSchema); +struct SSchema { + int8_t type; + int8_t flags; + col_id_t colId; + int32_t bytes; + char name[TSDB_COL_NAME_LEN]; +}; + typedef struct { - int32_t code; - int8_t hashMeta; - int64_t uid; - char* tblFName; - int32_t numOfRows; - int32_t affectedRows; - int64_t sver; + char tbName[TSDB_TABLE_NAME_LEN]; + char stbName[TSDB_TABLE_NAME_LEN]; + char dbFName[TSDB_DB_FNAME_LEN]; + int64_t dbId; + int32_t numOfTags; + int32_t numOfColumns; + int8_t precision; + int8_t tableType; + int32_t sversion; + int32_t tversion; + uint64_t suid; + uint64_t tuid; + int32_t vgId; + int8_t sysInfo; + SSchema* pSchemas; +} STableMetaRsp; + +typedef struct { + int32_t code; + int8_t hashMeta; + int64_t uid; + char* tblFName; + int32_t numOfRows; + int32_t affectedRows; + int64_t sver; + STableMetaRsp* pMeta; } SSubmitBlkRsp; typedef struct { @@ -290,19 +317,14 @@ typedef struct { int32_t tEncodeSSubmitRsp(SEncoder* pEncoder, const SSubmitRsp* pRsp); int32_t tDecodeSSubmitRsp(SDecoder* pDecoder, SSubmitRsp* pRsp); +void tFreeSSubmitBlkRsp(void* param); void tFreeSSubmitRsp(SSubmitRsp* pRsp); -#define COL_SMA_ON ((int8_t)0x1) -#define COL_IDX_ON ((int8_t)0x2) -#define COL_SET_NULL ((int8_t)0x10) -#define COL_SET_VAL ((int8_t)0x20) -struct SSchema { - int8_t type; - int8_t flags; - col_id_t colId; - int32_t bytes; - char name[TSDB_COL_NAME_LEN]; -}; +#define COL_SMA_ON ((int8_t)0x1) +#define COL_IDX_ON ((int8_t)0x2) +#define COL_SET_NULL ((int8_t)0x10) +#define COL_SET_VAL ((int8_t)0x20) +#define COL_IS_SYSINFO ((int8_t)0x40) #define COL_IS_SET(FLG) (((FLG) & (COL_SET_VAL | COL_SET_NULL)) != 0) #define COL_CLR_SET(FLG) ((FLG) &= (~(COL_SET_VAL | COL_SET_NULL))) @@ -472,6 +494,14 @@ int32_t tSerializeSMCreateStbReq(void* buf, int32_t bufLen, SMCreateStbReq* pReq int32_t tDeserializeSMCreateStbReq(void* buf, int32_t bufLen, SMCreateStbReq* pReq); void tFreeSMCreateStbReq(SMCreateStbReq* pReq); +typedef struct { + STableMetaRsp* pMeta; +} SMCreateStbRsp; + +int32_t tEncodeSMCreateStbRsp(SEncoder* pEncoder, const SMCreateStbRsp* pRsp); +int32_t tDecodeSMCreateStbRsp(SDecoder* pDecoder, SMCreateStbRsp* pRsp); +void tFreeSMCreateStbRsp(SMCreateStbRsp* pRsp); + typedef struct { char name[TSDB_TABLE_FNAME_LEN]; int8_t igNotExists; @@ -530,6 +560,7 @@ typedef struct { uint32_t connId; int32_t dnodeNum; int8_t superUser; + int8_t sysInfo; int8_t connType; SEpSet epSet; int32_t svrTimestamp; @@ -1239,23 +1270,6 @@ typedef struct { SVgroupInfo vgroups[]; } SVgroupsInfo; -typedef struct { - char tbName[TSDB_TABLE_NAME_LEN]; - char stbName[TSDB_TABLE_NAME_LEN]; - char dbFName[TSDB_DB_FNAME_LEN]; - int64_t dbId; - int32_t numOfTags; - int32_t numOfColumns; - int8_t precision; - int8_t tableType; - int32_t sversion; - int32_t tversion; - uint64_t suid; - uint64_t tuid; - int32_t vgId; - SSchema* pSchemas; -} STableMetaRsp; - typedef struct { STableMetaRsp* pMeta; } SMAlterStbRsp; @@ -1266,7 +1280,7 @@ void tFreeSMAlterStbRsp(SMAlterStbRsp* pRsp); int32_t tSerializeSTableMetaRsp(void* buf, int32_t bufLen, STableMetaRsp* pRsp); int32_t tDeserializeSTableMetaRsp(void* buf, int32_t bufLen, STableMetaRsp* pRsp); -void tFreeSTableMetaRsp(STableMetaRsp* pRsp); +void tFreeSTableMetaRsp(void* pRsp); void tFreeSTableIndexRsp(void* info); typedef struct { @@ -2028,11 +2042,13 @@ int tEncodeSVCreateTbBatchReq(SEncoder* pCoder, const SVCreateTbBatchReq* pReq); int tDecodeSVCreateTbBatchReq(SDecoder* pCoder, SVCreateTbBatchReq* pReq); typedef struct { - int32_t code; + int32_t code; + STableMetaRsp* pMeta; } SVCreateTbRsp, SVUpdateTbRsp; -int tEncodeSVCreateTbRsp(SEncoder* pCoder, const SVCreateTbRsp* pRsp); -int tDecodeSVCreateTbRsp(SDecoder* pCoder, SVCreateTbRsp* pRsp); +int tEncodeSVCreateTbRsp(SEncoder* pCoder, const SVCreateTbRsp* pRsp); +int tDecodeSVCreateTbRsp(SDecoder* pCoder, SVCreateTbRsp* pRsp); +void tFreeSVCreateTbRsp(void* param); int32_t tSerializeSVCreateTbReq(void** buf, SVCreateTbReq* pReq); void* tDeserializeSVCreateTbReq(void* buf, SVCreateTbReq* pReq); @@ -2555,10 +2571,14 @@ typedef struct { char topic[TSDB_TOPIC_FNAME_LEN]; int64_t ntbUid; SArray* colIdList; // SArray -} SCheckAlterInfo; +} STqCheckInfo; + +int32_t tEncodeSTqCheckInfo(SEncoder* pEncoder, const STqCheckInfo* pInfo); +int32_t tDecodeSTqCheckInfo(SDecoder* pDecoder, STqCheckInfo* pInfo); -int32_t tEncodeSCheckAlterInfo(SEncoder* pEncoder, const SCheckAlterInfo* pInfo); -int32_t tDecodeSCheckAlterInfo(SDecoder* pDecoder, SCheckAlterInfo* pInfo); +typedef struct { + char topic[TSDB_TOPIC_FNAME_LEN]; +} STqDelCheckInfoReq; typedef struct { int32_t vgId; @@ -2594,7 +2614,7 @@ enum { typedef struct { int8_t type; union { - // snapshot data + // snapshot struct { int64_t uid; int64_t ts; @@ -2606,6 +2626,22 @@ typedef struct { }; } STqOffsetVal; +static FORCE_INLINE void tqOffsetResetToData(STqOffsetVal* pOffsetVal, int64_t uid, int64_t ts) { + pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_DATA; + pOffsetVal->uid = uid; + pOffsetVal->ts = ts; +} + +static FORCE_INLINE void tqOffsetResetToMeta(STqOffsetVal* pOffsetVal, int64_t uid) { + pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_META; + pOffsetVal->uid = uid; +} + +static FORCE_INLINE void tqOffsetResetToLog(STqOffsetVal* pOffsetVal, int64_t ver) { + pOffsetVal->type = TMQ_OFFSET__LOG; + pOffsetVal->version = ver; +} + int32_t tEncodeSTqOffsetVal(SEncoder* pEncoder, const STqOffsetVal* pOffsetVal); int32_t tDecodeSTqOffsetVal(SDecoder* pDecoder, STqOffsetVal* pOffsetVal); int32_t tFormatOffset(char* buf, int32_t maxLen, const STqOffsetVal* pVal); @@ -2654,36 +2690,6 @@ typedef struct { int32_t tSerializeSMDropSmaReq(void* buf, int32_t bufLen, SMDropSmaReq* pReq); int32_t tDeserializeSMDropSmaReq(void* buf, int32_t bufLen, SMDropSmaReq* pReq); -typedef struct { - int32_t vgId; - SEpSet epSet; -} SVgEpSet; - -typedef struct { - int64_t suid; - int8_t level; -} SRSmaFetchMsg; - -static FORCE_INLINE int32_t tEncodeSRSmaFetchMsg(SEncoder* pCoder, const SRSmaFetchMsg* pReq) { - if (tStartEncode(pCoder) < 0) return -1; - - if (tEncodeI64(pCoder, pReq->suid) < 0) return -1; - if (tEncodeI8(pCoder, pReq->level) < 0) return -1; - - tEndEncode(pCoder); - return 0; -} - -static FORCE_INLINE int32_t tDecodeSRSmaFetchMsg(SDecoder* pCoder, SRSmaFetchMsg* pReq) { - if (tStartDecode(pCoder) < 0) return -1; - - if (tDecodeI64(pCoder, &pReq->suid) < 0) return -1; - if (tDecodeI8(pCoder, &pReq->level) < 0) return -1; - - tEndDecode(pCoder); - return 0; -} - typedef struct { int8_t version; // for compatibility(default 0) int8_t intervalUnit; // MACRO: TIME_UNIT_XXX @@ -2943,33 +2949,14 @@ static FORCE_INLINE void tDeleteSMqSubTopicEp(SMqSubTopicEp* pSubTopicEp) { typedef struct { SMqRspHead head; - int64_t reqOffset; - int64_t rspOffset; - STqOffsetVal reqOffsetNew; - STqOffsetVal rspOffsetNew; + STqOffsetVal rspOffset; int16_t resMsgType; int32_t metaRspLen; void* metaRsp; } SMqMetaRsp; -static FORCE_INLINE int32_t tEncodeSMqMetaRsp(void** buf, const SMqMetaRsp* pRsp) { - int32_t tlen = 0; - tlen += taosEncodeFixedI64(buf, pRsp->reqOffset); - tlen += taosEncodeFixedI64(buf, pRsp->rspOffset); - tlen += taosEncodeFixedI16(buf, pRsp->resMsgType); - tlen += taosEncodeFixedI32(buf, pRsp->metaRspLen); - tlen += taosEncodeBinary(buf, pRsp->metaRsp, pRsp->metaRspLen); - return tlen; -} - -static FORCE_INLINE void* tDecodeSMqMetaRsp(const void* buf, SMqMetaRsp* pRsp) { - buf = taosDecodeFixedI64(buf, &pRsp->reqOffset); - buf = taosDecodeFixedI64(buf, &pRsp->rspOffset); - buf = taosDecodeFixedI16(buf, &pRsp->resMsgType); - buf = taosDecodeFixedI32(buf, &pRsp->metaRspLen); - buf = taosDecodeBinary(buf, &pRsp->metaRsp, pRsp->metaRspLen); - return (void*)buf; -} +int32_t tEncodeSMqMetaRsp(SEncoder* pEncoder, const SMqMetaRsp* pRsp); +int32_t tDecodeSMqMetaRsp(SDecoder* pDecoder, SMqMetaRsp* pRsp); typedef struct { SMqRspHead head; @@ -2986,6 +2973,26 @@ typedef struct { int32_t tEncodeSMqDataRsp(SEncoder* pEncoder, const SMqDataRsp* pRsp); int32_t tDecodeSMqDataRsp(SDecoder* pDecoder, SMqDataRsp* pRsp); +void tDeleteSMqDataRsp(SMqDataRsp* pRsp); + +typedef struct { + SMqRspHead head; + STqOffsetVal reqOffset; + STqOffsetVal rspOffset; + int32_t blockNum; + int8_t withTbName; + int8_t withSchema; + SArray* blockDataLen; + SArray* blockData; + SArray* blockTbName; + SArray* blockSchema; + int32_t createTableNum; + SArray* createTableLen; + SArray* createTableReq; +} STaosxRsp; + +int32_t tEncodeSTaosxRsp(SEncoder* pEncoder, const STaosxRsp* pRsp); +int32_t tDecodeSTaosxRsp(SDecoder* pDecoder, STaosxRsp* pRsp); typedef struct { SMqRspHead head; diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 6462c7afbfa80a48219d64cbed3797317f705c16..006ba7f21bf0177c2b0104a51ef7908785cced2d 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -170,6 +170,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_MND_SPLIT_VGROUP, "split-vgroup", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_SHOW_VARIABLES, "show-variables", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_SERVER_VERSION, "server-version", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_UPTIME_TIMER, "uptime-timer", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL) TD_NEW_MSG_SEG(TDMT_VND_MSG) @@ -188,7 +189,8 @@ enum { TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_CHANGE, "vnode-mq-vg-change", SMqRebVgReq, SMqRebVgRsp) TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_DELETE, "vnode-mq-vg-delete", SMqVDeleteReq, SMqVDeleteRsp) TD_DEF_MSG_TYPE(TDMT_VND_MQ_COMMIT_OFFSET, "vnode-commit-offset", STqOffset, STqOffset) - TD_DEF_MSG_TYPE(TDMT_VND_CHECK_ALTER_INFO, "vnode-alter-check-info", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_ADD_CHECK_INFO, "vnode-add-check-info", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_DELETE_CHECK_INFO, "vnode-delete-check-info", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TOPIC, "vnode-create-topic", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_ALTER_TOPIC, "vnode-alter-topic", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DROP_TOPIC, "vnode-drop-topic", NULL, NULL) @@ -200,7 +202,8 @@ enum { TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_SMA, "vnode-cancel-sma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT_RSMA, "vnode-submit-rsma", SSubmitReq, SSubmitRsp) - TD_DEF_MSG_TYPE(TDMT_VND_FETCH_RSMA, "vnode-fetch-rsma", SRSmaFetchMsg, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_FETCH_RSMA, "vnode-fetch-rsma", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_EXEC_RSMA, "vnode-exec-rsma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DELETE, "delete-data", SVDeleteReq, SVDeleteRsp) TD_DEF_MSG_TYPE(TDMT_VND_BATCH_DEL, "batch-delete", SBatchDeleteReq, NULL) TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIG, "alter-config", NULL, NULL) diff --git a/include/libs/command/command.h b/include/libs/command/command.h index 8a4ecad37da3089c32ff0e3fca7473dcc334971c..b3339a417ba463212c3abc163b57519194953c10 100644 --- a/include/libs/command/command.h +++ b/include/libs/command/command.h @@ -17,12 +17,12 @@ #define TDENGINE_COMMAND_H #include "cmdnodes.h" -#include "tmsg.h" #include "plannodes.h" +#include "tmsg.h" typedef struct SExplainCtx SExplainCtx; -int32_t qExecCommand(SNode* pStmt, SRetrieveTableRsp** pRsp); +int32_t qExecCommand(bool sysInfoUser, SNode *pStmt, SRetrieveTableRsp **pRsp); int32_t qExecStaticExplain(SQueryPlan *pDag, SRetrieveTableRsp **pRsp); int32_t qExecExplainBegin(SQueryPlan *pDag, SExplainCtx **pCtx, int64_t startTs); diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index a64815f14fe0a0dbe5b85ffd0969a68d43f50d8e..25a6221fcb5344cd1f0d98af15840b3905321612 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -29,7 +29,7 @@ typedef void* DataSinkHandle; struct SRpcMsg; struct SSubplan; -typedef struct SReadHandle { +typedef struct { void* tqReader; void* meta; void* config; @@ -41,6 +41,10 @@ typedef struct SReadHandle { bool initTableReader; bool initTqReader; int32_t numOfVgroups; + + void* sContext; // SSnapContext* + + void* pStateBackend; } SReadHandle; // in queue mode, data streams are seperated by msg @@ -78,8 +82,8 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO /** * @brief Cleanup SSDataBlock for StreamScanInfo - * - * @param tinfo + * + * @param tinfo */ void tdCleanupStreamInputDataBlock(qTaskInfo_t tinfo); @@ -163,7 +167,7 @@ int32_t qGetQualifiedTableIdList(void* pTableList, const char* tagCond, int32_t void qProcessRspMsg(void* parent, struct SRpcMsg* pMsg, struct SEpSet* pEpSet); -int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList/*,int32_t* resNum, SExplainExecInfo** pRes*/); +int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList /*,int32_t* resNum, SExplainExecInfo** pRes*/); int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len); @@ -180,11 +184,17 @@ int32_t qGetStreamScanStatus(qTaskInfo_t tinfo, uint64_t* uid, int64_t* ts); int32_t qStreamPrepareTsdbScan(qTaskInfo_t tinfo, uint64_t uid, int64_t ts); -int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset); +int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType); int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset); -void* qStreamExtractMetaMsg(qTaskInfo_t tinfo); +SMqMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo); + +int64_t qStreamExtractPrepareUid(qTaskInfo_t tinfo); + +const SSchemaWrapper* qExtractSchemaFromTask(qTaskInfo_t tinfo); + +const char* qExtractTbnameFromTask(qTaskInfo_t tinfo); void* qExtractReaderFromStreamScanner(void* scanner); diff --git a/include/libs/function/function.h b/include/libs/function/function.h index e708a2c42d237e1f911ef8db994e94965f9877dd..3f26eee86ad3f1b4666c55283ad346f60a7b4f31 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -92,6 +92,8 @@ struct SResultRowEntryInfo; //for selectivity query, the corresponding tag value is assigned if the data is qualified typedef struct SSubsidiaryResInfo { int16_t num; + int32_t rowLen; + char* buf; // serialize data buffer struct SqlFunctionCtx **pCtx; } SSubsidiaryResInfo; @@ -118,6 +120,11 @@ typedef struct SInputColumnInfoData { uint64_t uid; // table uid, used to set the tag value when building the final query result for selectivity functions. } SInputColumnInfoData; +typedef struct SSerializeDataHandle { + struct SDiskbasedBuf* pBuf; + int32_t currentPage; +} SSerializeDataHandle; + // sql function runtime context typedef struct SqlFunctionCtx { SInputColumnInfoData input; @@ -137,11 +144,10 @@ typedef struct SqlFunctionCtx { SFuncExecFuncs fpSet; SScalarFuncExecFuncs sfp; struct SExprInfo *pExpr; - struct SDiskbasedBuf *pBuf; struct SSDataBlock *pSrcBlock; - struct SSDataBlock *pDstBlock; // used by indifinite rows function to set selectivity - int32_t curBufPage; - bool increase; + struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity + SSerializeDataHandle saveHandle; + bool isStream; char udfName[TSDB_FUNC_NAME_LEN]; } SqlFunctionCtx; diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h index 741b0fddebf36cd1a8f16d0d2265742bcb9ac16c..c9c19579cb1c6943c5914aebed20668a1c1ff156 100644 --- a/include/libs/function/functionMgt.h +++ b/include/libs/function/functionMgt.h @@ -176,7 +176,8 @@ int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen); EFuncReturnRows fmGetFuncReturnRows(SFunctionNode* pFunc); -bool fmIsBuiltinFunc(const char* pFunc); +bool fmIsBuiltinFunc(const char* pFunc); +EFunctionType fmGetFuncType(const char* pFunc); bool fmIsAggFunc(int32_t funcId); bool fmIsScalarFunc(int32_t funcId); diff --git a/include/libs/function/taosudf.h b/include/libs/function/taosudf.h index 5e84b87a81ec1808dfc368ac285f4dabd2e1d57e..2b2063e3f61e575cd59de099feee3b83ad87ff9c 100644 --- a/include/libs/function/taosudf.h +++ b/include/libs/function/taosudf.h @@ -256,8 +256,9 @@ static FORCE_INLINE int32_t udfColDataSet(SUdfColumn* pColumn, uint32_t currentR typedef int32_t (*TUdfScalarProcFunc)(SUdfDataBlock* block, SUdfColumn *resultCol); typedef int32_t (*TUdfAggStartFunc)(SUdfInterBuf *buf); -typedef int32_t (*TUdfAggProcessFunc)(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf); -typedef int32_t (*TUdfAggFinishFunc)(SUdfInterBuf* buf, SUdfInterBuf *resultData); +typedef int32_t (*TUdfAggProcessFunc)(SUdfDataBlock *block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf); +typedef int32_t (*TUdfAggMergeFunc)(SUdfInterBuf *inputBuf1, SUdfInterBuf *inputBuf2, SUdfInterBuf *outputBuf); +typedef int32_t (*TUdfAggFinishFunc)(SUdfInterBuf *buf, SUdfInterBuf *resultData); #ifdef __cplusplus } diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h index bb75efa00ac23f163e9e2eec94df0560e78fb463..5743d3360857dab460841d89e50360ba53d36b39 100644 --- a/include/libs/nodes/nodes.h +++ b/include/libs/nodes/nodes.h @@ -105,7 +105,7 @@ typedef enum ENodeType { QUERY_NODE_COLUMN_REF, // Statement nodes are used in parser and planner module. - QUERY_NODE_SET_OPERATOR, + QUERY_NODE_SET_OPERATOR = 100, QUERY_NODE_SELECT_STMT, QUERY_NODE_VNODE_MODIF_STMT, QUERY_NODE_CREATE_DATABASE_STMT, @@ -198,7 +198,7 @@ typedef enum ENodeType { QUERY_NODE_QUERY, // logic plan node - QUERY_NODE_LOGIC_PLAN_SCAN, + QUERY_NODE_LOGIC_PLAN_SCAN = 1000, QUERY_NODE_LOGIC_PLAN_JOIN, QUERY_NODE_LOGIC_PLAN_AGG, QUERY_NODE_LOGIC_PLAN_PROJECT, @@ -215,7 +215,7 @@ typedef enum ENodeType { QUERY_NODE_LOGIC_PLAN, // physical plan node - QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN, + QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN = 1100, QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN, QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN, QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN, diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 8661baceb2dc426e69e459aec33c6c730b419e7e..6fd6a316ebd1bd6b0dcdb9b0b222716cfa33203e 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -317,6 +317,7 @@ typedef struct SSystemTableScanPhysiNode { SEpSet mgmtEpSet; bool showRewrite; int32_t accountId; + bool sysInfo; } SSystemTableScanPhysiNode; typedef struct STableScanPhysiNode { diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index 088da73a1ad1785322ebc069d8e932e6287c9fc6..3a1eaf289e4ba245544b985e893f746845c37c88 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -57,7 +57,9 @@ typedef enum EColumnType { COLUMN_TYPE_COLUMN = 1, COLUMN_TYPE_TAG, COLUMN_TYPE_TBNAME, - COLUMN_TYPE_WINDOW_PC, + COLUMN_TYPE_WINDOW_START, + COLUMN_TYPE_WINDOW_END, + COLUMN_TYPE_WINDOW_DURATION, COLUMN_TYPE_GROUP_KEY } EColumnType; @@ -276,6 +278,7 @@ typedef struct SSelectStmt { bool hasLastRowFunc; bool hasTimeLineFunc; bool hasUdaf; + bool hasStateKey; bool onlyHasKeepOrderFunc; bool groupSort; } SSelectStmt; @@ -428,6 +431,9 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal); char* nodesGetFillModeString(EFillMode mode); int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc); +const char* operatorTypeStr(EOperatorType type); +const char* logicConditionTypeStr(ELogicConditionType type); + #ifdef __cplusplus } #endif diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h index a3de9164a2d4418dd1edc8493d46c20f2fafdcac..95bde858640b3d4cd5df616bc1d0a5a65795d8f3 100644 --- a/include/libs/parser/parser.h +++ b/include/libs/parser/parser.h @@ -49,10 +49,13 @@ typedef struct SParseContext { SStmtCallback* pStmtCb; const char* pUser; bool isSuperUser; + bool enableSysInfo; bool async; int8_t schemalessType; const char* svrVer; bool nodeOffline; + SArray* pTableMetaPos; // sql table pos => catalog data pos + SArray* pTableVgroupPos; // sql table pos => catalog data pos } SParseContext; int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery); @@ -84,8 +87,8 @@ int32_t qBindStmtSingleColValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBu int32_t rowNum); int32_t qBuildStmtColFields(void* pDataBlock, int32_t* fieldNum, TAOS_FIELD_E** fields); int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields); -int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName, TAOS_MULTI_BIND* bind, - char* msgBuf, int32_t msgBufLen); +int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName, + TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen); void destroyBoundColumnInfo(void* pBoundInfo); int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* dbName, char* msgBuf, int32_t msgBufLen); diff --git a/include/libs/planner/planner.h b/include/libs/planner/planner.h index d1a5c5db103d940c9e36dd9ad637461b2e3361b5..05caa7a7bb56617ef34c03e3646f85ac98f65a56 100644 --- a/include/libs/planner/planner.h +++ b/include/libs/planner/planner.h @@ -38,6 +38,7 @@ typedef struct SPlanContext { char* pMsg; int32_t msgLen; const char* pUser; + bool sysInfo; } SPlanContext; // Create the physical plan for the query, according to the AST. diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h index 34d870397f953e9a85a9a5b44b6a4fc57c0594c9..1fa7dca7dc6ad975e87e18570c8a9a35d990bb7e 100644 --- a/include/libs/qcom/query.h +++ b/include/libs/qcom/query.h @@ -215,6 +215,7 @@ void initQueryModuleMsgHandle(); const SSchema* tGetTbnameColumnSchema(); bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags); +int32_t queryCreateCTableMetaFromMsg(STableMetaRsp *msg, SCTableMeta *pMeta); int32_t queryCreateTableMetaFromMsg(STableMetaRsp* msg, bool isSuperTable, STableMeta** pMeta); char* jobTaskStatusStr(int32_t status); diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index f51c37ed47505710043d4590c744fd8dff783916..2c275090080f73577cd28b3e10b3f1e102b4556e 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -53,6 +53,7 @@ enum { TASK_SCHED_STATUS__WAITING, TASK_SCHED_STATUS__ACTIVE, TASK_SCHED_STATUS__FAILED, + TASK_SCHED_STATUS__DROPPING, }; enum { @@ -127,6 +128,10 @@ typedef struct { int8_t type; } SStreamCheckpoint; +typedef struct { + int8_t type; +} SStreamTaskDestroy; + typedef struct { int8_t type; SSDataBlock* pBlock; @@ -211,7 +216,6 @@ typedef struct { void* vnode; FTbSink* tbSinkFunc; STSchema* pTSchema; - SHashObj* pHash; // groupId to tbuid } STaskSinkTb; typedef void FSmaSink(void* vnode, int64_t smaId, const SArray* data); @@ -259,6 +263,14 @@ typedef struct { SArray* checkpointVer; } SStreamRecoveringState; +// incremental state storage +typedef struct { + SStreamTask* pOwner; + TDB* db; + TTB* pStateDb; + TXN txn; +} SStreamState; + typedef struct SStreamTask { int64_t streamId; int32_t taskId; @@ -275,12 +287,8 @@ typedef struct SStreamTask { int32_t nodeId; SEpSet epSet; - // used for task source and sink, - // while task agg should have processedVer for each child int64_t recoverSnapVer; int64_t startVer; - int64_t checkpointVer; - int64_t processedVer; // children info SArray* childEpInfo; // SArray @@ -312,6 +320,10 @@ typedef struct SStreamTask { // msg handle SMsgCb* pMsgCb; + + // state backend + SStreamState* pState; + } SStreamTask; int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo); @@ -507,7 +519,7 @@ typedef struct SStreamMeta { char* path; TDB* db; TTB* pTaskDb; - TTB* pStateDb; + TTB* pCheckpointDb; SHashObj* pTasks; SHashObj* pRecoverStatus; void* ahandle; @@ -519,7 +531,7 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF void streamMetaClose(SStreamMeta* streamMeta); int32_t streamMetaAddTask(SStreamMeta* pMeta, SStreamTask* pTask); -int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, char* msg, int32_t msgLen); +int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char* msg, int32_t msgLen); int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId); SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId); @@ -528,6 +540,37 @@ int32_t streamMetaCommit(SStreamMeta* pMeta); int32_t streamMetaRollBack(SStreamMeta* pMeta); int32_t streamLoadTasks(SStreamMeta* pMeta); +SStreamState* streamStateOpen(char* path, SStreamTask* pTask); +void streamStateClose(SStreamState* pState); +int32_t streamStateBegin(SStreamState* pState); +int32_t streamStateCommit(SStreamState* pState); +int32_t streamStateAbort(SStreamState* pState); + +typedef struct { + TBC* pCur; +} SStreamStateCur; + +#if 1 +int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen); +int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); +int32_t streamStateDel(SStreamState* pState, const SWinKey* key); +void streamFreeVal(void* val); + +SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key); +SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key); +SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key); +void streamStateFreeCur(SStreamStateCur* pCur); + +int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); + +int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur); +int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur); + +int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur); +int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur); + +#endif + #ifdef __cplusplus } #endif diff --git a/include/libs/stream/tstreamUpdate.h b/include/libs/stream/tstreamUpdate.h index 78543118da026cf8558e9f9208689b3bf5cbac75..c186430f3ff3123bf39b38fd61ec81008e8a69c2 100644 --- a/include/libs/stream/tstreamUpdate.h +++ b/include/libs/stream/tstreamUpdate.h @@ -25,33 +25,34 @@ extern "C" { #endif typedef struct SUpdateInfo { - SArray *pTsBuckets; - uint64_t numBuckets; - SArray *pTsSBFs; - uint64_t numSBFs; - int64_t interval; - int64_t watermark; - TSKEY minTS; - SScalableBf* pCloseWinSBF; - SHashObj* pMap; - STimeWindow scanWindow; - uint64_t scanGroupId; - uint64_t maxVersion; + SArray *pTsBuckets; + uint64_t numBuckets; + SArray *pTsSBFs; + uint64_t numSBFs; + int64_t interval; + int64_t watermark; + TSKEY minTS; + SScalableBf *pCloseWinSBF; + SHashObj *pMap; + STimeWindow scanWindow; + uint64_t scanGroupId; + uint64_t maxVersion; } SUpdateInfo; -SUpdateInfo *updateInfoInitP(SInterval* pInterval, int64_t watermark); +SUpdateInfo *updateInfoInitP(SInterval *pInterval, int64_t watermark); SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark); -bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts); -void updateInfoSetScanRange(SUpdateInfo *pInfo, STimeWindow* pWin, uint64_t groupId, uint64_t version); -bool updateInfoIgnore(SUpdateInfo *pInfo, STimeWindow* pWin, uint64_t groupId, uint64_t version); -void updateInfoDestroy(SUpdateInfo *pInfo); -void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo); -void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo); -int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo); -int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo); +bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts); +bool updateInfoIsTableInserted(SUpdateInfo *pInfo, int64_t tbUid); +void updateInfoSetScanRange(SUpdateInfo *pInfo, STimeWindow *pWin, uint64_t groupId, uint64_t version); +bool updateInfoIgnore(SUpdateInfo *pInfo, STimeWindow *pWin, uint64_t groupId, uint64_t version); +void updateInfoDestroy(SUpdateInfo *pInfo); +void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo); +void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo); +int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo); +int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo); #ifdef __cplusplus } #endif -#endif /* ifndef _TSTREAMUPDATE_H_ */ \ No newline at end of file +#endif /* ifndef _TSTREAMUPDATE_H_ */ diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index aa563343f8d860ba80ec243a1b594a6545a61ff7..e6a4dd1d493969a333005a64f515ba35dde34573 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -26,10 +26,15 @@ extern "C" { extern bool gRaftDetailLog; -#define SYNC_RESP_TTL_MS 10000000 -#define SYNC_SPEED_UP_HB_TIMER 400 -#define SYNC_SPEED_UP_AFTER_MS (1000 * 20) -#define SYNC_SLOW_DOWN_RANGE 100 +#define SYNC_RESP_TTL_MS 10000000 +#define SYNC_SPEED_UP_HB_TIMER 400 +#define SYNC_SPEED_UP_AFTER_MS (1000 * 20) +#define SYNC_SLOW_DOWN_RANGE 100 +#define SYNC_MAX_READ_RANGE 2 +#define SYNC_MAX_PROGRESS_WAIT_MS 4000 +#define SYNC_MAX_START_TIME_RANGE_MS (1000 * 20) +#define SYNC_MAX_RECV_TIME_RANGE_MS 1200 +#define SYNC_ADD_QUORUM_COUNT 3 #define SYNC_MAX_BATCH_SIZE 1 #define SYNC_INDEX_BEGIN 0 @@ -210,9 +215,12 @@ void syncStop(int64_t rid); int32_t syncSetStandby(int64_t rid); ESyncState syncGetMyRole(int64_t rid); bool syncIsReady(int64_t rid); +bool syncIsReadyForRead(int64_t rid); const char* syncGetMyRoleStr(int64_t rid); bool syncRestoreFinish(int64_t rid); SyncTerm syncGetMyTerm(int64_t rid); +SyncIndex syncGetLastIndex(int64_t rid); +SyncIndex syncGetCommitIndex(int64_t rid); SyncGroupId syncGetVgId(int64_t rid); void syncGetEpSet(int64_t rid, SEpSet* pEpSet); void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet); diff --git a/include/libs/sync/syncTools.h b/include/libs/sync/syncTools.h index cd2c2d4a4f8b708246786fa1ea0cf030d5799593..6c95c3c6d72929045bd780056811c1938864717b 100644 --- a/include/libs/sync/syncTools.h +++ b/include/libs/sync/syncTools.h @@ -423,6 +423,7 @@ typedef struct SyncAppendEntriesReply { SyncTerm privateTerm; bool success; SyncIndex matchIndex; + int64_t startTime; } SyncAppendEntriesReply; SyncAppendEntriesReply* syncAppendEntriesReplyBuild(int32_t vgId); diff --git a/include/os/os.h b/include/os/os.h index b036002f8adb5d246db8346112f2189f779f73cd..71966061a19a175d816010ff6425b4004b1f2223 100644 --- a/include/os/os.h +++ b/include/os/os.h @@ -79,6 +79,7 @@ extern "C" { #include #include +#include "taoserror.h" #include "osAtomic.h" #include "osDef.h" #include "osDir.h" diff --git a/include/os/osSemaphore.h b/include/os/osSemaphore.h index 7fca20d75e2eaece441656bc4ae2c707e0b15cd3..e52da96f0170d4d67d9fb8fa3aeff7270223e2d3 100644 --- a/include/os/osSemaphore.h +++ b/include/os/osSemaphore.h @@ -23,10 +23,9 @@ extern "C" { #include #if defined(_TD_DARWIN_64) - +#include // typedef struct tsem_s *tsem_t; -typedef struct bosal_sem_t *tsem_t; - +typedef dispatch_semaphore_t tsem_t; int tsem_init(tsem_t *sem, int pshared, unsigned int value); int tsem_wait(tsem_t *sem); diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 3ca6978156bb99d40245bd89c09981786c3b8d46..e39172d74e52e852f0fa1812634e494d61ac6213 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -49,7 +49,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_RPC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x0019) //common & util -#define TSDB_CODE_TIME_UNSYNCED TAOS_DEF_ERROR_CODE(0, 0x0013) +#define TSDB_CODE_TIME_UNSYNCED TAOS_DEF_ERROR_CODE(0, 0x0013) #define TSDB_CODE_APP_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0014) #define TSDB_CODE_OPS_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x0100) @@ -222,7 +222,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_INVALID_DB_OPTION TAOS_DEF_ERROR_CODE(0, 0x0382) #define TSDB_CODE_MND_INVALID_DB TAOS_DEF_ERROR_CODE(0, 0x0383) #define TSDB_CODE_MND_TOO_MANY_DATABASES TAOS_DEF_ERROR_CODE(0, 0x0385) -#define TSDB_CODE_MND_DB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0388) +#define TSDB_CODE_MND_DB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0388) #define TSDB_CODE_MND_INVALID_DB_ACCT TAOS_DEF_ERROR_CODE(0, 0x0389) #define TSDB_CODE_MND_DB_OPTION_UNCHANGED TAOS_DEF_ERROR_CODE(0, 0x038A) #define TSDB_CODE_MND_DB_INDEX_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x038B) @@ -291,6 +291,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_STREAM_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x03F1) #define TSDB_CODE_MND_INVALID_STREAM_OPTION TAOS_DEF_ERROR_CODE(0, 0x03F2) #define TSDB_CODE_MND_STREAM_MUST_BE_DELETED TAOS_DEF_ERROR_CODE(0, 0x03F3) +#define TSDB_CODE_MND_STREAM_TASK_DROPPED TAOS_DEF_ERROR_CODE(0, 0x03F4) // mnode-sma #define TSDB_CODE_MND_SMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0480) @@ -432,7 +433,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TQ_NO_DISK_PERMISSIONS TAOS_DEF_ERROR_CODE(0, 0x0A03) #define TSDB_CODE_TQ_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x0A04) #define TSDB_CODE_TQ_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0A05) -#define TSDB_CODE_TQ_FILE_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x0A06) +#define TSDB_CODE_TQ_FILE_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x0A06) #define TSDB_CODE_TQ_FAILED_TO_CREATE_DIR TAOS_DEF_ERROR_CODE(0, 0x0A07) #define TSDB_CODE_TQ_META_NO_SUCH_KEY TAOS_DEF_ERROR_CODE(0, 0x0A08) #define TSDB_CODE_TQ_META_KEY_NOT_IN_TXN TAOS_DEF_ERROR_CODE(0, 0x0A09) @@ -489,7 +490,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_PAR_WRONG_NUMBER_OF_SELECT TAOS_DEF_ERROR_CODE(0, 0x2609) #define TSDB_CODE_PAR_GROUPBY_LACK_EXPRESSION TAOS_DEF_ERROR_CODE(0, 0x260A) #define TSDB_CODE_PAR_NOT_SELECTED_EXPRESSION TAOS_DEF_ERROR_CODE(0, 0x260B) -#define TSDB_CODE_PAR_NOT_SINGLE_GROUP TAOS_DEF_ERROR_CODE(0, 0x260C) +#define TSDB_CODE_PAR_NOT_SINGLE_GROUP TAOS_DEF_ERROR_CODE(0, 0x260C) #define TSDB_CODE_PAR_TAGS_NOT_MATCHED TAOS_DEF_ERROR_CODE(0, 0x260D) #define TSDB_CODE_PAR_INVALID_TAG_NAME TAOS_DEF_ERROR_CODE(0, 0x260E) #define TSDB_CODE_PAR_NAME_OR_PASSWD_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x2610) @@ -614,6 +615,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_RSMA_REMOVE_EXISTS TAOS_DEF_ERROR_CODE(0, 0x3154) #define TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x3155) #define TSDB_CODE_RSMA_EMPTY_INFO TAOS_DEF_ERROR_CODE(0, 0x3156) +#define TSDB_CODE_RSMA_INVALID_SCHEMA TAOS_DEF_ERROR_CODE(0, 0x3157) //index #define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200) @@ -622,6 +624,7 @@ int32_t* taosGetErrno(); //tmq #define TSDB_CODE_TMQ_INVALID_MSG TAOS_DEF_ERROR_CODE(0, 0x4000) #define TSDB_CODE_TMQ_CONSUMER_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x4001) +#define TSDB_CODE_TMQ_CONSUMER_CLOSED TAOS_DEF_ERROR_CODE(0, 0x4002) #ifdef __cplusplus } diff --git a/include/util/tcompare.h b/include/util/tcompare.h index cc9e8ae4641138be528830e17467dab7897f0166..c7a3ca20f222c7d919460b31e9f3c55a79325f46 100644 --- a/include/util/tcompare.h +++ b/include/util/tcompare.h @@ -105,6 +105,97 @@ int32_t compareStrPatternNotMatch(const void *pLeft, const void *pRight); int32_t compareWStrPatternMatch(const void *pLeft, const void *pRight); int32_t compareWStrPatternNotMatch(const void *pLeft, const void *pRight); +int32_t compareInt8Int16(const void *pLeft, const void *pRight); +int32_t compareInt8Int32(const void *pLeft, const void *pRight); +int32_t compareInt8Int64(const void *pLeft, const void *pRight); +int32_t compareInt8Float(const void *pLeft, const void *pRight); +int32_t compareInt8Double(const void *pLeft, const void *pRight); +int32_t compareInt8Uint8(const void *pLeft, const void *pRight); +int32_t compareInt8Uint16(const void *pLeft, const void *pRight); +int32_t compareInt8Uint32(const void *pLeft, const void *pRight); +int32_t compareInt8Uint64(const void *pLeft, const void *pRight); +int32_t compareInt16Int8(const void *pLeft, const void *pRight); +int32_t compareInt16Int32(const void *pLeft, const void *pRight); +int32_t compareInt16Int64(const void *pLeft, const void *pRight); +int32_t compareInt16Float(const void *pLeft, const void *pRight); +int32_t compareInt16Double(const void *pLeft, const void *pRight); +int32_t compareInt16Uint8(const void *pLeft, const void *pRight); +int32_t compareInt16Uint16(const void *pLeft, const void *pRight); +int32_t compareInt16Uint32(const void *pLeft, const void *pRight); +int32_t compareInt16Uint64(const void *pLeft, const void *pRight); +int32_t compareInt32Int8(const void *pLeft, const void *pRight); +int32_t compareInt32Int16(const void *pLeft, const void *pRight); +int32_t compareInt32Int64(const void *pLeft, const void *pRight); +int32_t compareInt32Float(const void *pLeft, const void *pRight); +int32_t compareInt32Double(const void *pLeft, const void *pRight); +int32_t compareInt32Uint8(const void *pLeft, const void *pRight); +int32_t compareInt32Uint16(const void *pLeft, const void *pRight); +int32_t compareInt32Uint32(const void *pLeft, const void *pRight); +int32_t compareInt32Uint64(const void *pLeft, const void *pRight); +int32_t compareInt64Int8(const void *pLeft, const void *pRight); +int32_t compareInt64Int16(const void *pLeft, const void *pRight); +int32_t compareInt64Int32(const void *pLeft, const void *pRight); +int32_t compareInt64Float(const void *pLeft, const void *pRight); +int32_t compareInt64Double(const void *pLeft, const void *pRight); +int32_t compareInt64Uint8(const void *pLeft, const void *pRight); +int32_t compareInt64Uint16(const void *pLeft, const void *pRight); +int32_t compareInt64Uint32(const void *pLeft, const void *pRight); +int32_t compareInt64Uint64(const void *pLeft, const void *pRight); +int32_t compareFloatInt8(const void *pLeft, const void *pRight); +int32_t compareFloatInt16(const void *pLeft, const void *pRight); +int32_t compareFloatInt32(const void *pLeft, const void *pRight); +int32_t compareFloatInt64(const void *pLeft, const void *pRight); +int32_t compareFloatDouble(const void *pLeft, const void *pRight); +int32_t compareFloatUint8(const void *pLeft, const void *pRight); +int32_t compareFloatUint16(const void *pLeft, const void *pRight); +int32_t compareFloatUint32(const void *pLeft, const void *pRight); +int32_t compareFloatUint64(const void *pLeft, const void *pRight); +int32_t compareDoubleInt8(const void *pLeft, const void *pRight); +int32_t compareDoubleInt16(const void *pLeft, const void *pRight); +int32_t compareDoubleInt32(const void *pLeft, const void *pRight); +int32_t compareDoubleInt64(const void *pLeft, const void *pRight); +int32_t compareDoubleFloat(const void *pLeft, const void *pRight); +int32_t compareDoubleUint8(const void *pLeft, const void *pRight); +int32_t compareDoubleUint16(const void *pLeft, const void *pRight); +int32_t compareDoubleUint32(const void *pLeft, const void *pRight); +int32_t compareDoubleUint64(const void *pLeft, const void *pRight); +int32_t compareUint8Int8(const void *pLeft, const void *pRight); +int32_t compareUint8Int16(const void *pLeft, const void *pRight); +int32_t compareUint8Int32(const void *pLeft, const void *pRight); +int32_t compareUint8Int64(const void *pLeft, const void *pRight); +int32_t compareUint8Float(const void *pLeft, const void *pRight); +int32_t compareUint8Double(const void *pLeft, const void *pRight); +int32_t compareUint8Uint16(const void *pLeft, const void *pRight); +int32_t compareUint8Uint32(const void *pLeft, const void *pRight); +int32_t compareUint8Uint64(const void *pLeft, const void *pRight); +int32_t compareUint16Int8(const void *pLeft, const void *pRight); +int32_t compareUint16Int16(const void *pLeft, const void *pRight); +int32_t compareUint16Int32(const void *pLeft, const void *pRight); +int32_t compareUint16Int64(const void *pLeft, const void *pRight); +int32_t compareUint16Float(const void *pLeft, const void *pRight); +int32_t compareUint16Double(const void *pLeft, const void *pRight); +int32_t compareUint16Uint8(const void *pLeft, const void *pRight); +int32_t compareUint16Uint32(const void *pLeft, const void *pRight); +int32_t compareUint16Uint64(const void *pLeft, const void *pRight); +int32_t compareUint32Int8(const void *pLeft, const void *pRight); +int32_t compareUint32Int16(const void *pLeft, const void *pRight); +int32_t compareUint32Int32(const void *pLeft, const void *pRight); +int32_t compareUint32Int64(const void *pLeft, const void *pRight); +int32_t compareUint32Float(const void *pLeft, const void *pRight); +int32_t compareUint32Double(const void *pLeft, const void *pRight); +int32_t compareUint32Uint8(const void *pLeft, const void *pRight); +int32_t compareUint32Uint16(const void *pLeft, const void *pRight); +int32_t compareUint32Uint64(const void *pLeft, const void *pRight); +int32_t compareUint64Int8(const void *pLeft, const void *pRight); +int32_t compareUint64Int16(const void *pLeft, const void *pRight); +int32_t compareUint64Int32(const void *pLeft, const void *pRight); +int32_t compareUint64Int64(const void *pLeft, const void *pRight); +int32_t compareUint64Float(const void *pLeft, const void *pRight); +int32_t compareUint64Double(const void *pLeft, const void *pRight); +int32_t compareUint64Uint8(const void *pLeft, const void *pRight); +int32_t compareUint64Uint16(const void *pLeft, const void *pRight); +int32_t compareUint64Uint32(const void *pLeft, const void *pRight); + __compar_fn_t getComparFunc(int32_t type, int32_t optr); __compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order); int32_t doCompare(const char *a, const char *b, int32_t type, size_t size); diff --git a/include/util/tdef.h b/include/util/tdef.h index a3deb73fd4b666872204e64093565c460e7d2548..2bc821b8736edf745a30e0e103734e4e7b7b31e4 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -132,15 +132,14 @@ typedef enum EOperatorType { OP_TYPE_DIV, OP_TYPE_REM, // unary arithmetic operator - OP_TYPE_MINUS, - OP_TYPE_ASSIGN, + OP_TYPE_MINUS = 20, // bitwise operator - OP_TYPE_BIT_AND, + OP_TYPE_BIT_AND = 30, OP_TYPE_BIT_OR, // binary comparison operator - OP_TYPE_GREATER_THAN, + OP_TYPE_GREATER_THAN = 40, OP_TYPE_GREATER_EQUAL, OP_TYPE_LOWER_THAN, OP_TYPE_LOWER_EQUAL, @@ -153,7 +152,7 @@ typedef enum EOperatorType { OP_TYPE_MATCH, OP_TYPE_NMATCH, // unary comparison operator - OP_TYPE_IS_NULL, + OP_TYPE_IS_NULL = 100, OP_TYPE_IS_NOT_NULL, OP_TYPE_IS_TRUE, OP_TYPE_IS_FALSE, @@ -163,8 +162,11 @@ typedef enum EOperatorType { OP_TYPE_IS_NOT_UNKNOWN, // json operator - OP_TYPE_JSON_GET_VALUE, - OP_TYPE_JSON_CONTAINS + OP_TYPE_JSON_GET_VALUE = 150, + OP_TYPE_JSON_CONTAINS, + + // internal operator + OP_TYPE_ASSIGN = 200 } EOperatorType; #define OP_TYPE_CALC_MAX OP_TYPE_BIT_OR @@ -384,7 +386,7 @@ typedef enum ELogicConditionType { #define TSDB_DEFAULT_EXPLAIN_VERBOSE false -#define TSDB_EXPLAIN_RESULT_ROW_SIZE 512 +#define TSDB_EXPLAIN_RESULT_ROW_SIZE (16*1024) #define TSDB_EXPLAIN_RESULT_COLUMN_NAME "QUERY_PLAN" #define TSDB_MAX_FIELD_LEN 16384 diff --git a/include/util/tencode.h b/include/util/tencode.h index ad642cd612db4d1bb31f57b7a49d977e90978ee5..a6dd58297e8c1dba644d86eb5145b273406fbf9e 100644 --- a/include/util/tencode.h +++ b/include/util/tencode.h @@ -264,12 +264,14 @@ static FORCE_INLINE int32_t tEncodeDouble(SEncoder* pCoder, double val) { static FORCE_INLINE int32_t tEncodeBinary(SEncoder* pCoder, const uint8_t* val, uint32_t len) { if (tEncodeU32v(pCoder, len) < 0) return -1; - if (pCoder->data) { - if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, len)) return -1; - memcpy(TD_CODER_CURRENT(pCoder), val, len); - } + if (len) { + if (pCoder->data) { + if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, len)) return -1; + memcpy(TD_CODER_CURRENT(pCoder), val, len); + } - TD_CODER_MOVE_POS(pCoder, len); + TD_CODER_MOVE_POS(pCoder, len); + } return 0; } @@ -414,14 +416,18 @@ static int32_t tDecodeCStrTo(SDecoder* pCoder, char* val) { static FORCE_INLINE int32_t tDecodeBinaryAlloc(SDecoder* pCoder, void** val, uint64_t* len) { uint64_t length = 0; if (tDecodeU64v(pCoder, &length) < 0) return -1; - if (len) *len = length; + if (length) { + if (len) *len = length; - if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1; - *val = taosMemoryMalloc(length); - if (*val == NULL) return -1; - memcpy(*val, TD_CODER_CURRENT(pCoder), length); + if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1; + *val = taosMemoryMalloc(length); + if (*val == NULL) return -1; + memcpy(*val, TD_CODER_CURRENT(pCoder), length); - TD_CODER_MOVE_POS(pCoder, length); + TD_CODER_MOVE_POS(pCoder, length); + } else { + *val = NULL; + } return 0; } diff --git a/include/util/thash.h b/include/util/thash.h index 781c22a56aaba0d449d1f711b32fe4bd75a39003..f4d09eb0906b04bfd40d97c39ec66feb3b1967a1 100644 --- a/include/util/thash.h +++ b/include/util/thash.h @@ -210,6 +210,8 @@ void taosHashSetEqualFp(SHashObj *pHashObj, _equal_fn_t fp); */ void taosHashSetFreeFp(SHashObj *pHashObj, _hash_free_fn_t fp); +int64_t taosHashGetCompTimes(SHashObj *pHashObj); + #ifdef __cplusplus } #endif diff --git a/include/util/tpagedbuf.h b/include/util/tpagedbuf.h index ef266068cbaff046ec6ebcf0bf02d0b44ee9d3a2..9ab89273e6895c2ea322fa116c06332a431028bc 100644 --- a/include/util/tpagedbuf.h +++ b/include/util/tpagedbuf.h @@ -58,19 +58,17 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem /** * * @param pBuf - * @param groupId * @param pageId * @return */ -void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId); +void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId); /** * * @param pBuf - * @param groupId * @return */ -SIDList getDataBufPagesIdList(SDiskbasedBuf* pBuf, int32_t groupId); +SIDList getDataBufPagesIdList(SDiskbasedBuf* pBuf); /** * get the specified buffer page by id @@ -101,13 +99,6 @@ void releaseBufPageInfo(SDiskbasedBuf* pBuf, struct SPageInfo* pi); */ size_t getTotalBufSize(const SDiskbasedBuf* pBuf); -/** - * get the number of groups in the result buffer - * @param pBuf - * @return - */ -size_t getNumOfBufGroupId(const SDiskbasedBuf* pBuf); - /** * destroy result buffer * @param pBuf diff --git a/include/util/tqueue.h b/include/util/tqueue.h index 0f4f1db9eee4c1b57e25464cd947c1c96218fbec..da409a90bb96c2b19ad081c4599a9fa75de1ad4e 100644 --- a/include/util/tqueue.h +++ b/include/util/tqueue.h @@ -76,6 +76,7 @@ void taosFreeQall(STaosQall *qall); int32_t taosReadAllQitems(STaosQueue *queue, STaosQall *qall); int32_t taosGetQitem(STaosQall *qall, void **ppItem); void taosResetQitems(STaosQall *qall); +int32_t taosQallItemSize(STaosQall *qall); STaosQset *taosOpenQset(); void taosCloseQset(STaosQset *qset); diff --git a/include/util/tref.h b/include/util/tref.h index 7e08bb045b08f56afe59436d07576b6e7604c12c..c2cc54cb07ac3167941061b475f8811e460a3b91 100644 --- a/include/util/tref.h +++ b/include/util/tref.h @@ -29,11 +29,11 @@ int32_t taosOpenRef(int32_t max, void (*fp)(void *)); // close the reference set, refId is the return value by taosOpenRef // return 0 if success. On error, -1 is returned, and terrno is set appropriately -int32_t taosCloseRef(int32_t refId); +int32_t taosCloseRef(int32_t rsetId); // add ref, p is the pointer to resource or pointer ID // return Reference ID(rid) allocated. On error, -1 is returned, and terrno is set appropriately -int64_t taosAddRef(int32_t refId, void *p); +int64_t taosAddRef(int32_t rsetId, void *p); // remove ref, rid is the reference ID returned by taosAddRef // return 0 if success. On error, -1 is returned, and terrno is set appropriately diff --git a/packaging/MPtestJenkinsfile b/packaging/MPtestJenkinsfile new file mode 100644 index 0000000000000000000000000000000000000000..45c8d8abf24877d30af67cb0b34151278612f57c --- /dev/null +++ b/packaging/MPtestJenkinsfile @@ -0,0 +1,202 @@ +def sync_source(branch_name) { + sh ''' + hostname + ip addr|grep 192|awk '{print $2}'|sed "s/\\/.*//" + echo ''' + branch_name + ''' + ''' + sh ''' + cd ${TDINTERNAL_ROOT_DIR} + git reset --hard + git fetch || git fetch + git checkout ''' + branch_name + ''' -f + git branch + git pull || git pull + git log | head -n 20 + cd ${TDENGINE_ROOT_DIR} + git reset --hard + git fetch || git fetch + rm -rf examples/rust/ + git checkout ''' + branch_name + ''' -f + git branch + git pull || git pull + git log | head -n 20 + git submodule update --init --recursive + ''' + return 1 +} +def run_test() { + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + + ''' + sh ''' + export LD_LIBRARY_PATH=${TDINTERNAL_ROOT_DIR}/debug/build/lib + ./fulltest.sh + ''' + sh ''' + cd ${TDENGINE_ROOT_DIR}/tests + ./test-all.sh b1fq + ''' +} +def build_run() { + sync_source("${BRANCH_NAME}") +} +pipeline { + agent none + parameters { + string ( + name:'version', + defaultValue:'3.0.0.1', + description: 'release version number,eg: 3.0.0.1 or 3.0.0.' + ) + string ( + name:'baseVersion', + defaultValue:'3.0.0.1', + description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1' + ) + string ( + name:'toolsVersion', + defaultValue:'2.1.2', + description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1' + ) + string ( + name:'toolsBaseVersion', + defaultValue:'2.1.2', + description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1' + ) + } + environment{ + WORK_DIR = '/var/lib/jenkins/workspace' + TDINTERNAL_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal' + TDENGINE_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal/community' + BRANCH_NAME = '3.0' + + TD_SERVER_TAR = "TDengine-server-${version}-Linux-x64.tar.gz" + BASE_TD_SERVER_TAR = "TDengine-server-${baseVersion}-Linux-x64.tar.gz" + + TD_SERVER_ARM_TAR = "TDengine-server-${version}-Linux-arm64.tar.gz" + BASE_TD_SERVER_ARM_TAR = "TDengine-server-${baseVersion}-Linux-arm64.tar.gz" + + TD_SERVER_LITE_TAR = "TDengine-server-${version}-Linux-x64-Lite.tar.gz" + BASE_TD_SERVER_LITE_TAR = "TDengine-server-${baseVersion}-Linux-x64-Lite.tar.gz" + + TD_CLIENT_TAR = "TDengine-client-${version}-Linux-x64.tar.gz" + BASE_TD_CLIENT_TAR = "TDengine-client-${baseVersion}-Linux-x64.tar.gz" + + TD_CLIENT_ARM_TAR = "TDengine-client-${version}-Linux-arm64.tar.gz" + BASE_TD_CLIENT_ARM_TAR = "TDengine-client-${baseVersion}-Linux-arm64.tar.gz" + + TD_CLIENT_LITE_TAR = "TDengine-client-${version}-Linux-x64-Lite.tar.gz" + BASE_TD_CLIENT_LITE_TAR = "TDengine-client-${baseVersion}-Linux-x64-Lite.tar.gz" + + TD_SERVER_RPM = "TDengine-server-${version}-Linux-x64.rpm" + + TD_SERVER_DEB = "TDengine-server-${version}-Linux-x64.deb" + + TD_SERVER_EXE = "TDengine-server-${version}-Windows-x64.exe" + + TD_CLIENT_EXE = "TDengine-client-${version}-Windows-x64.exe" + + TD_TOOLS_TAR = "taosTools-${toolsVersion}-Linux-x64.tar.gz" + + + } + stages { + stage ('RUN') { + parallel { + stage('ubuntu16') { + agent{label " ubuntu16 "} + steps { + timeout(time: 30, unit: 'MINUTES'){ + sync_source("${BRANCH_NAME}") + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server + python3 checkPackageRuning.py + ''' + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server + python3 checkPackageRuning.py + ''' + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server + python3 checkPackageRuning.py + ''' + } + } + } + stage('ubuntu18') { + agent{label " ubuntu18 "} + steps { + timeout(time: 30, unit: 'MINUTES'){ + sync_source("${BRANCH_NAME}") + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server + python3 checkPackageRuning.py + ''' + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server + python3 checkPackageRuning.py + ''' + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_DEB} ${baseVersion} server + python3 checkPackageRuning.py + ''' + } + } + } + stage('centos7') { + agent{label " centos7_9 "} + steps { + timeout(time: 30, unit: 'MINUTES'){ + sync_source("${BRANCH_NAME}") + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server + python3 checkPackageRuning.py + ''' + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server + python3 checkPackageRuning.py + ''' + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server + python3 checkPackageRuning.py + ''' + } + } + } + stage('centos8') { + agent{label " centos8_3 "} + steps { + timeout(time: 30, unit: 'MINUTES'){ + sync_source("${BRANCH_NAME}") + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server + python3 checkPackageRuning.py + ''' + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server + python3 checkPackageRuning.py + ''' + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging + bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server + python3 checkPackageRuning.py + ''' + } + } + } + } + } + } +} \ No newline at end of file diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index 7d77a0b23e70782f1a8a0160812820c91640f9dc..87f465fdb93ddbff8973430b11ecadc13878069d 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -5,217 +5,97 @@ # # ######################################################## -# first fully qualified domain name (FQDN) for TDengine system +# The end point of the first dnode in the cluster to be connected to when `taosd` or `taos` is started # firstEp hostname:6030 -# local fully qualified domain name (FQDN) +# The end point of the second dnode to be connected to if the firstEp is not available when `taosd` or `taos` is started +# secondEp + +# The FQDN of the host where `taosd` will be started. It can be IP address # fqdn hostname -# first port number for the connection (12 continuous UDP/TCP port number are used) +# The port for external access after `taosd` is started # serverPort 6030 -# log file's directory +# The maximum number of connections a dnode can accept +# maxShellConns 5000 + +# The directory for writing log files # logDir /var/log/taos -# data file's directory +# All data files are stored in this directory # dataDir /var/lib/taos # temporary file's directory # tempDir /tmp/ -# the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only -# arbitrator arbitrator_hostname:6042 - -# number of threads per CPU core -# numOfThreadsPerCore 1.0 - -# number of threads to commit cache data -# numOfCommitThreads 4 - -# the proportion of total CPU cores available for query processing -# 2.0: the query threads will be set to double of the CPU cores. -# 1.0: all CPU cores are available for query processing [default]. -# 0.5: only half of the CPU cores are available for query. -# 0.0: only one core available. -# ratioOfQueryCores 1.0 - -# the last_row/first/last aggregator will not change the original column name in the result fields -keepColumnName 1 - -# number of management nodes in the system -# numOfMnodes 1 - -# enable/disable backuping vnode directory when removing vnode -# vnodeBak 1 - -# enable/disable installation / usage report +# Switch for allowing TDengine to collect and report service usage information # telemetryReporting 1 -# enable/disable load balancing -# balance 1 - -# role for dnode. 0 - any, 1 - mnode, 2 - dnode -# role 0 - -# max timer control blocks -# maxTmrCtrl 512 - -# time interval of system monitor, seconds -# monitorInterval 30 +# The maximum number of vnodes supported by dnode +# supportVnodes 0 -# number of seconds allowed for a dnode to be offline, for cluster only -# offlineThreshold 864000 - -# RPC re-try timer, millisecond -# rpcTimer 300 - -# RPC maximum time for ack, seconds. -# rpcMaxTime 600 - -# time interval of dnode status reporting to mnode, seconds, for cluster only +# The interval of dnode reporting status to mnode # statusInterval 1 -# time interval of heart beat from shell to dnode, seconds +# The interval for TDengine CLI to send heartbeat to mnode # shellActivityTimer 3 -# minimum sliding window time, milli-second +# The minimum sliding window time, milli-second # minSlidingTime 10 -# minimum time window, milli-second +# The minimum time window, milli-second # minIntervalTime 10 -# maximum delay before launching a stream computation, milli-second -# maxStreamCompDelay 20000 - -# maximum delay before launching a stream computation for the first time, milli-second -# maxFirstStreamCompDelay 10000 - -# retry delay when a stream computation fails, milli-second -# retryStreamCompDelay 10 - -# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9 -# streamCompDelayRatio 0.1 - -# max number of vgroups per db, 0 means configured automatically -# maxVgroupsPerDb 0 - -# max number of tables per vnode -# maxTablesPerVnode 1000000 - -# cache block size (Mbyte) -# cache 16 - -# number of cache blocks per vnode -# blocks 6 - -# number of days per DB file -# days 10 - -# number of days to keep DB file -# keep 3650 - -# minimum rows of records in file block -# minRows 100 - -# maximum rows of records in file block -# maxRows 4096 - -# the number of acknowledgments required for successful data writing -# quorum 1 - -# enable/disable compression -# comp 2 - -# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync -# walLevel 1 - -# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away -# fsync 3000 - -# number of replications, for cluster only -# replica 1 +# The maximum allowed query buffer size in MB during query processing for each data node +# -1 no limit (default) +# 0 no query allowed, queries are disabled +# queryBufferSize -1 -# the compressed rpc message, option: +# The compressed rpc message, option: # -1 (no compression) # 0 (all message compressed), # > 0 (rpc message body which larger than this value will be compressed) -# compressMsgSize -1 +# compressMsgSize -1 # query retrieved column data compression option: # -1 (no compression) # 0 (all retrieved column data compressed), # > 0 (any retrieved column size greater than this value all data will be compressed.) -# compressColData -1 - -# max length of an SQL -# maxSQLLength 65480 - -# max length of WildCards -# maxWildCardsLength 100 - -# the maximum number of records allowed for super table time sorting -# maxNumOfOrderedRes 100000 +# compressColData -1 # system time zone # timezone Asia/Shanghai (CST, +0800) # system time zone (for windows 10) -# timezone UTC-8 +# timezone UTC-8 # system locale -# locale en_US.UTF-8 +# locale en_US.UTF-8 # default system charset -# charset UTF-8 - -# max number of connections allowed in dnode -# maxShellConns 5000 - -# max number of connections allowed in client -# maxConnections 5000 +# charset UTF-8 # stop writing logs when the disk size of the log folder is less than this value -# minimalLogDirGB 1.0 +# minimalLogDirGB 1.0 # stop writing temporary files when the disk size of the tmp folder is less than this value -# minimalTmpDirGB 1.0 +# minimalTmpDirGB 1.0 # if disk free space is less than this value, taosd service exit directly within startup process -# minimalDataDirGB 2.0 - -# One mnode is equal to the number of vnode consumed -# mnodeEqualVnodeNum 4 - -# enbale/disable http service -# http 1 +# minimalDataDirGB 2.0 # enable/disable system monitor -# monitor 1 - -# enable/disable recording the SQL statements via restful interface -# httpEnableRecordSql 0 - -# number of threads used to process http requests -# httpMaxThreads 2 - -# maximum number of rows returned by the restful interface -# restfulRowLimit 10240 - -# database name must be specified in restful interface if the following parameter is set, off by default -# httpDbNameMandatory 1 - -# http keep alive, default is 30 seconds -# httpKeepAlive 30000 +# monitor 1 # The following parameter is used to limit the maximum number of lines in log files. # max number of lines per log filters -# numOfLogLines 10000000 +# numOfLogLines 10000000 # enable/disable async log -# asyncLog 1 +# asyncLog 1 # time of keeping log files, days -# logKeepDays 0 - +# logKeepDays 0 # The following parameters are used for debug purpose only. # debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR @@ -226,87 +106,64 @@ keepColumnName 1 # 207: output trace, debug, warning and error to both screen and file # debug flag for all log type, take effect when non-zero value -# debugFlag 0 +# debugFlag 0 -# debug flag for meta management messages -# mDebugFlag 135 +# debug flag for timer +# tmrDebugFlag 131 -# debug flag for dnode messages -# dDebugFlag 135 +# debug flag for util +# uDebugFlag 131 -# debug flag for sync module -# sDebugFlag 135 +# debug flag for rpc +# rpcDebugFlag 131 -# debug flag for WAL -# wDebugFlag 135 +# debug flag for jni +# jniDebugFlag 131 -# debug flag for SDB -# sdbDebugFlag 135 - -# debug flag for RPC -# rpcDebugFlag 131 - -# debug flag for TAOS TIMER -# tmrDebugFlag 131 - -# debug flag for TDengine client -# cDebugFlag 131 - -# debug flag for JNI -# jniDebugFlag 131 - -# debug flag for storage -# uDebugFlag 131 - -# debug flag for http server -# httpDebugFlag 131 +# debug flag for query +# qDebugFlag 131 -# debug flag for monitor -# monDebugFlag 131 +# debug flag for taosc driver +# cDebugFlag 131 -# debug flag for query -# qDebugFlag 131 +# debug flag for dnode messages +# dDebugFlag 135 # debug flag for vnode -# vDebugFlag 131 - -# debug flag for TSDB -# tsdbDebugFlag 131 +# vDebugFlag 131 -# debug flag for continue query -# cqDebugFlag 131 +# debug flag for meta management messages +# mDebugFlag 135 -# enable/disable recording the SQL in taos client -# enableRecordSql 0 +# debug flag for wal +# wDebugFlag 135 -# generate core file when service crash -# enableCoreFile 1 +# debug flag for sync module +# sDebugFlag 135 -# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden -# maxBinaryDisplayWidth 30 +# debug flag for tsdb +# tsdbDebugFlag 131 -# enable/disable stream (continuous query) -# stream 1 +# debug flag for tq +# tqDebugFlag 131 -# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode -# retrieveBlockingModel 0 +# debug flag for fs +# fsDebugFlag 131 -# the maximum allowed query buffer size in MB during query processing for each data node -# -1 no limit (default) -# 0 no query allowed, queries are disabled -# queryBufferSize -1 +# debug flag for udf +# udfDebugFlag 131 -# percent of redundant data in tsdb meta will compact meta data,0 means donot compact -# tsdbMetaCompactRatio 0 +# debug flag for sma +# smaDebugFlag 131 -# default string type used for storing JSON String, options can be binary/nchar, default is nchar -# defaultJSONStrType nchar +# debug flag for index +# idxDebugFlag 131 -# force TCP transmission -# rpcForceTcp 0 +# debug flag for tdb +# tdbDebugFlag 131 -# unit MB. Flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks -# walFlushSize 1024 +# debug flag for meta +# metaDebugFlag 131 -# unit Hour. Latency of data migration -# keepTimeOffset 0 +# generate core file when service crash +# enableCoreFile 1 diff --git a/packaging/checkPackageRuning.py b/packaging/checkPackageRuning.py new file mode 100755 index 0000000000000000000000000000000000000000..c0d1e8b86c3df2150b7f434e899c545439ab0477 --- /dev/null +++ b/packaging/checkPackageRuning.py @@ -0,0 +1,103 @@ +#!/usr/bin/python +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### +# install pip +# pip install src/connector/python/ + +# -*- coding: utf-8 -*- +import sys , os +import getopt +import subprocess +# from this import d +import time + +# install taospy + +out = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ") +print("taospy version %s "%out) +if (out == "" ): + os.system("pip install git+https://github.com/taosdata/taos-connector-python.git") + print("install taos python connector") +else: + os.system("pip3 install --upgrade taospy ") + + + +# start taosd prepare +os.system("rm -rf /var/lib/taos/*") +os.system("systemctl restart taosd ") + +# wait a moment ,at least 5 seconds +time.sleep(5) + +# prepare data by taosBenchmark + +os.system("taosBenchmark -y -n 100 -t 100") + +import taos + +conn = taos.connect(host="localhost", + user="root", + password="taosdata", + database="test", + port=6030, + config="/etc/taos", # for windows the default value is C:\TDengine\cfg + timezone="Asia/Shanghai") # default your host's timezone + +server_version = conn.server_info +print("server_version", server_version) +client_version = conn.client_info +print("client_version", client_version) # 3.0.0.0 + +# Execute a sql and get its result set. It's useful for SELECT statement +result: taos.TaosResult = conn.query("SELECT count(*) from test.meters") + +data = result.fetch_all() + +if data[0][0] !=10000: + print(" taosBenchmark work not as expected ") + sys.exit(1) +else: + print(" taosBenchmark work as expected ") + +# test taosdump dump out data and dump in data + +# dump out datas +os.system("taosdump --version") +os.system("mkdir -p /tmp/dumpdata") +os.system("rm -rf /tmp/dumpdata/*") + + + +# dump data out +print("taosdump dump out data") + +os.system("taosdump -o /tmp/dumpdata -D test -y ") + +# drop database of test +print("drop database test") +os.system(" taos -s ' drop database test ;' ") + +# dump data in +print("taosdump dump data in") +os.system("taosdump -i /tmp/dumpdata -y ") + +result = conn.query("SELECT count(*) from test.meters") + +data = result.fetch_all() + +if data[0][0] !=10000: + print(" taosdump work not as expected ") + sys.exit(1) +else: + print(" taosdump work as expected ") + +conn.close() \ No newline at end of file diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm index 5676bf5c4324a340aab09c18c59636d4fc80d43c..65f261db2c6c1ac70b761312af68a5188acea541 100644 --- a/packaging/deb/DEBIAN/prerm +++ b/packaging/deb/DEBIAN/prerm @@ -1,6 +1,6 @@ #!/bin/bash -if [ $1 -eq "abort-upgrade" ]; then +if [ "$1"x = "abort-upgrade"x ]; then exit 0 fi @@ -29,6 +29,7 @@ else # Remove all links ${csudo}rm -f ${bin_link_dir}/taos || : ${csudo}rm -f ${bin_link_dir}/taosd || : + ${csudo}rm -f ${bin_link_dir}/udfd || : ${csudo}rm -f ${bin_link_dir}/taosadapter || : ${csudo}rm -f ${bin_link_dir}/taosdemo || : ${csudo}rm -f ${cfg_link_dir}/* || : diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 6de475a4c0ea48e8aa6befe1a735bf67e3f88b8f..3db9005f95a3027c42dd05b9f28d448ade5852cb 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -60,6 +60,7 @@ cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_pat cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin +cp ${compile_dir}/build/bin/udfd ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin if [ -f "${compile_dir}/build/bin/taosadapter" ]; then diff --git a/packaging/docker/README.md b/packaging/docker/README.md index e41182f471050af6b4d47b696eb237e319b2dd80..763ab73724587eb4dc231eb399a60937eaba6dca 100644 --- a/packaging/docker/README.md +++ b/packaging/docker/README.md @@ -47,7 +47,7 @@ taos> show databases; Query OK, 1 row(s) in set (0.002843s) ``` -Since TDengine use container hostname to establish connections, it's a bit more complex to use taos shell and native connectors(such as JDBC-JNI) with TDengine container instance. This is the recommended way to expose ports and use TDengine with docker in simple cases. If you want to use taos shell or taosc/connectors smoothly outside the `tdengine` container, see next use cases that match you need. +Since TDengine use container hostname to establish connections, it's a bit more complex to use TDengine CLI and native connectors(such as JDBC-JNI) with TDengine container instance. This is the recommended way to expose ports and use TDengine with docker in simple cases. If you want to use TDengine CLI or taosc/connectors smoothly outside the `tdengine` container, see next use cases that match you need. ### Start with host network @@ -87,7 +87,7 @@ docker run -d \ This command starts a docker container with TDengine server running and maps the container's TCP ports from 6030 to 6049 to the host's ports from 6030 to 6049 with TCP protocol and UDP ports range 6030-6039 to the host's UDP ports 6030-6039. If the host is already running TDengine server and occupying the same port(s), you need to map the container's port to a different unused port segment. (Please see TDengine 2.0 Port Description for details). In order to support TDengine clients accessing TDengine server services, both TCP and UDP ports need to be exposed by default(unless `rpcForceTcp` is set to `1`). -If you want to use taos shell or native connectors([JDBC-JNI](https://www.taosdata.com/cn/documentation/connector/java), or [driver-go](https://github.com/taosdata/driver-go)), you need to make sure the `TAOS_FQDN` is resolvable at `/etc/hosts` or with custom DNS service. +If you want to use TDengine CLI or native connectors([JDBC-JNI](https://www.taosdata.com/cn/documentation/connector/java), or [driver-go](https://github.com/taosdata/driver-go)), you need to make sure the `TAOS_FQDN` is resolvable at `/etc/hosts` or with custom DNS service. If you set the `TAOS_FQDN` to host's hostname, it will works as using `hosts` network like previous use case. Otherwise, like in `-e TAOS_FQDN=tdengine`, you can add the hostname record `tdengine` into `/etc/hosts` (use `127.0.0.1` here in host path, if use TDengine client/application in other hosts, you should set the right ip to the host eg. `192.168.10.1`(check the real ip in host with `hostname -i` or `ip route list default`) to make the TDengine endpoint resolvable): @@ -158,7 +158,7 @@ When you build your application with docker, you should add the TDengine client FROM ubuntu:20.04 RUN apt-get update && apt-get install -y wget ENV TDENGINE_VERSION=2.4.0.0 -RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ +RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && cd TDengine-client-${TDENGINE_VERSION} \ && ./install_client.sh \ @@ -265,7 +265,7 @@ Full version of dockerfile could be: ```dockerfile FROM golang:1.17.6-buster as builder ENV TDENGINE_VERSION=2.4.0.0 -RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ +RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && cd TDengine-client-${TDENGINE_VERSION} \ && ./install_client.sh \ @@ -279,7 +279,7 @@ RUN go env && go mod tidy && go build FROM ubuntu:20.04 RUN apt-get update && apt-get install -y wget ENV TDENGINE_VERSION=2.4.0.0 -RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ +RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && cd TDengine-client-${TDENGINE_VERSION} \ && ./install_client.sh \ @@ -391,7 +391,7 @@ test_td-1_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, test_td-2_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp ``` -Check dnodes with taos shell: +Check dnodes with TDengine CLI: ```bash $ docker-compose exec td-1 taos -s "show dnodes" diff --git a/packaging/release.bat b/packaging/release.bat index ffd3a680486985a9e302a51f6fd2d910ea2c381d..591227382f9cec4a2fa1308a9b827994430f7236 100644 --- a/packaging/release.bat +++ b/packaging/release.bat @@ -40,10 +40,12 @@ if not exist %work_dir%\debug\ver-%2-x86 ( ) cd %work_dir%\debug\ver-%2-x64 call vcvarsall.bat x64 -cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -DVERNUMBER=%2 -DCPUTYPE=x64 +cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -DBUILD_TEST=false -DVERNUMBER=%2 -DCPUTYPE=x64 cmake --build . rd /s /Q C:\TDengine cmake --install . +for /r c:\TDengine %%i in (*.dll) do signtool sign /f D:\\123.pfx /p taosdata %%i +for /r c:\TDengine %%i in (*.exe) do signtool sign /f D:\\123.pfx /p taosdata %%i if not %errorlevel% == 0 ( call :RUNFAILED build x64 failed & exit /b 1) cd %package_dir% iscc /DMyAppInstallName="%packagServerName_x64%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release @@ -51,19 +53,7 @@ if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x64% faile iscc /DMyAppInstallName="%packagClientName_x64%" /DMyAppVersion="%2" /DMyAppExcludeSource="taosd.exe" tools\tdengine.iss /O..\release if not %errorlevel% == 0 ( call :RUNFAILED package %packagClientName_x64% failed & exit /b 1) -cd %work_dir%\debug\ver-%2-x86 -call vcvarsall.bat x86 -cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -DVERNUMBER=%2 -DCPUTYPE=x86 -cmake --build . -rd /s /Q C:\TDengine -cmake --install . -if not %errorlevel% == 0 ( call :RUNFAILED build x86 failed & exit /b 1) -cd %package_dir% -@REM iscc /DMyAppInstallName="%packagServerName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release -@REM if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x86% failed & exit /b 1) -iscc /DMyAppInstallName="%packagClientName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="taosd.exe" tools\tdengine.iss /O..\release -if not %errorlevel% == 0 ( call :RUNFAILED package %packagClientName_x86% failed & exit /b 1) - +for /r ..\release %%i in (*.exe) do signtool sign /f d:\\123.pfx /p taosdata %%i goto EXIT0 :USAGE diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index 7a34f7a222b13b5e91a88297428df3f425960309..637d2d425a457bd6fabf69867d556ee3e29fe0c5 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -69,6 +69,7 @@ cp %{_compiledir}/../packaging/tools/set_core.sh %{buildroot}%{homepath}/bin cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin +cp %{_compiledir}/build/bin/udfd %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin if [ -f %{_compiledir}/build/bin/taosadapter ]; then @@ -204,6 +205,7 @@ if [ $1 -eq 0 ];then # Remove all links ${csudo}rm -f ${bin_link_dir}/taos || : ${csudo}rm -f ${bin_link_dir}/taosd || : + ${csudo}rm -f ${bin_link_dir}/udfd || : ${csudo}rm -f ${bin_link_dir}/taosadapter || : ${csudo}rm -f ${cfg_link_dir}/* || : ${csudo}rm -f ${inc_link_dir}/taos.h || : diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh new file mode 100755 index 0000000000000000000000000000000000000000..758c5541784917e7adad0fe655587cc7843cc0a1 --- /dev/null +++ b/packaging/testpackage.sh @@ -0,0 +1,169 @@ +#!/bin/sh + + +packgeName=$1 +version=$2 +originPackageName=$3 +originversion=$4 +testFile=$5 +subFile="taos.tar.gz" +password=$6 + +if [ ${testFile} = "server" ];then + tdPath="TDengine-server-${version}" + originTdpPath="TDengine-server-${originversion}" + installCmd="install.sh" +elif [ ${testFile} = "client" ];then + tdPath="TDengine-client-${version}" + originTdpPath="TDengine-client-${originversion}" + installCmd="install_client.sh" +elif [ ${testFile} = "tools" ];then + tdPath="taosTools-${version}" + originTdpPath="taosTools-${originversion}" + installCmd="install-taostools.sh" +fi + +function cmdInstall { +comd=$1 +if command -v ${comd} ;then + echo "${comd} is already installed" +else + if command -v apt ;then + apt-get install ${comd} -y + elif command -v yum ;then + yum -y install ${comd} + echo "you should install ${comd} manually" + fi +fi +} + + +echo "Uninstall all components of TDeingne" + +if command -v rmtaos ;then + echo "uninstall all components of TDeingne:rmtaos" + rmtaos +else + echo "os doesn't include TDengine " +fi + +if command -v rmtaostools ;then + echo "uninstall all components of TDeingne:rmtaostools" + rmtaostools +else + echo "os doesn't include rmtaostools " +fi + + +cmdInstall tree +cmdInstall wget +cmdInstall sshpass + +echo "new workroom path" +installPath="/usr/local/src/packageTest" +oriInstallPath="/usr/local/src/packageTest/3.1" + +if [ ! -d ${installPath} ] ;then + mkdir -p ${installPath} +else + echo "${installPath} already exists" +fi + + +if [ ! -d ${oriInstallPath} ] ;then + mkdir -p ${oriInstallPath} +else + echo "${oriInstallPath} already exists" +fi + + + + +echo "download installPackage" +# cd ${installPath} +# wget https://www.taosdata.com/assets-download/3.0/${packgeName} +# cd ${oriInstallPath} +# wget https://www.taosdata.com/assets-download/3.0/${originPackageName} + +cd ${installPath} +if [ ! -f {packgeName} ];then + sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} . +fi +if [ ! -f debAuto.sh ];then + echo '#!/usr/bin/expect ' > debAuto.sh + echo 'set timeout 3 ' >> debAuto.sh + echo 'pset packgeName [lindex $argv 0]' >> debAuto.sh + echo 'spawn dpkg -i ${packgeName}' >> debAuto.sh + echo 'expect "*one:"' >> debAuto.sh + echo 'send "\r"' >> debAuto.sh + echo 'expect "*skip:"' >> debAuto.sh + echo 'send "\r" ' >> debAuto.sh +fi + +if [[ ${packgeName} =~ "deb" ]];then + cd ${installPath} + dpkg -r taostools + dpkg -r tdengine + if [[ ${packgeName} =~ "TDengine" ]];then + echo "./debAuto.sh ${packgeName}" && chmod 755 debAuto.sh && ./debAuto.sh ${packgeName} + else + echo "dpkg -i ${packgeName}" && dpkg -i ${packgeName} + fi +elif [[ ${packgeName} =~ "rpm" ]];then + cd ${installPath} + echo "rpm ${packgeName}" && rpm -ivh ${packgeName} --quiet +elif [[ ${packgeName} =~ "tar" ]];then + cd ${oriInstallPath} + if [ ! -f {originPackageName} ];then + sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${originversion}/community${originPackageName} . + fi + echo "tar -xvf ${originPackageName}" && tar -xvf ${originPackageName} + + cd ${installPath} + echo "tar -xvf ${packgeName}" && tar -xvf ${packgeName} + + + if [ ${testFile} != "tools" ] ;then + cd ${installPath}/${tdPath} && tar vxf ${subFile} + cd ${oriInstallPath}/${originTdpPath} && tar vxf ${subFile} + fi + + echo "check installPackage File" + + cd ${installPath} + + tree ${oriInstallPath}/${originTdpPath} > ${oriInstallPath}/${originPackageName}_checkfile + tree ${installPath}/${tdPath} > ${installPath}/${packgeName}_checkfile + + diff ${installPath}/${packgeName}_checkfile ${oriInstallPath}/${originPackageName}_checkfile > ${installPath}/diffFile.log + diffNumbers=`cat ${installPath}/diffFile.log |wc -l ` + if [ ${diffNumbers} != 0 ];then + echo "The number and names of files have changed from the previous installation package" + echo `cat ${installPath}/diffFile.log` + exit -1 + fi + + cd ${installPath}/${tdPath} + if [ ${testFile} = "server" ];then + bash ${installCmd} -e no + else + bash ${installCmd} + fi + if [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]] ;then + cd ${installPath} + sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz . + # wget https://www.taosdata.com/assets-download/3.0/taosTools-2.1.2-Linux-x64.tar.gz + tar xvf taosTools-2.1.2-Linux-x64.tar.gz + cd taosTools-2.1.2 && bash install-taostools.sh + elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "deb" ]] ;then + cd ${installPath} + sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.deb . + dpkg -i taosTools-2.1.2-Linux-x64.deb + elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "rpm" ]] ;then + cd ${installPath} + sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.rpm . + rpm -ivh taosTools-2.1.2-Linux-x64.rpm --quiet + fi + +fi + diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index eda2b052d1c3d1a7d4abf0f5b168255915242171..39606ead300c8c603b9f25360d19e3af49b642ff 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -18,6 +18,7 @@ script_dir=$(dirname $(readlink -f "$0")) clientName="taos" serverName="taosd" +udfdName="udfd" configFile="taos.cfg" productName="TDengine" emailName="taosdata.com" @@ -192,6 +193,7 @@ function install_bin() { # Remove links ${csudo}rm -f ${bin_link_dir}/${clientName} || : ${csudo}rm -f ${bin_link_dir}/${serverName} || : + ${csudo}rm -f ${bin_link_dir}/${udfdName} || : ${csudo}rm -f ${bin_link_dir}/${adapterName} || : ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : ${csudo}rm -f ${bin_link_dir}/${demoName} || : @@ -205,6 +207,7 @@ function install_bin() { #Make link [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || : [ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} || : + [ -x ${install_main_dir}/bin/${udfdName} ] && ${csudo}ln -s ${install_main_dir}/bin/${udfdName} ${bin_link_dir}/${udfdName} || : [ -x ${install_main_dir}/bin/${adapterName} ] && ${csudo}ln -s ${install_main_dir}/bin/${adapterName} ${bin_link_dir}/${adapterName} || : [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || : [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || : @@ -742,7 +745,7 @@ function is_version_compatible() { fi exist_version=$(${installDir}/bin/${serverName} -V | head -1 | cut -d ' ' -f 3) - vercomp $exist_version "2.0.16.0" + vercomp $exist_version "3.0.0.0" case $? in 2) prompt_force=1 diff --git a/packaging/tools/make_install.bat b/packaging/tools/make_install.bat index 0f9e836ae2a6cfd80b598b363ae489cb08345d6c..d4dde391c8c2ca034c8e31ba2f5a413a3050fc1d 100644 --- a/packaging/tools/make_install.bat +++ b/packaging/tools/make_install.bat @@ -1,7 +1,57 @@ @echo off goto %1 :needAdmin + +if exist C:\\TDengine\\data\\dnode\\dnodeCfg.json ( + echo The default data directory C:/TDengine/data contains old data of tdengine 2.x, please clear it before installing! +) +set source_dir=%2 +set source_dir=%source_dir:/=\\% +set binary_dir=%3 +set binary_dir=%binary_dir:/=\\% +set osType=%4 +set verNumber=%5 +set tagert_dir=C:\\TDengine + +if not exist %tagert_dir% ( + mkdir %tagert_dir% +) +if not exist %tagert_dir%\\cfg ( + mkdir %tagert_dir%\\cfg +) +if not exist %tagert_dir%\\include ( + mkdir %tagert_dir%\\include +) +if not exist %tagert_dir%\\driver ( + mkdir %tagert_dir%\\driver +) +if not exist C:\\TDengine\\cfg\\taos.cfg ( + copy %source_dir%\\packaging\\cfg\\taos.cfg %tagert_dir%\\cfg\\taos.cfg > nul +) + +if exist %binary_dir%\\test\\cfg\\taosadapter.toml ( + if not exist %tagert_dir%\\cfg\\taosadapter.toml ( + copy %binary_dir%\\test\\cfg\\taosadapter.toml %tagert_dir%\\cfg\\taosadapter.toml > nul + ) +) + +copy %source_dir%\\include\\client\\taos.h %tagert_dir%\\include > nul +copy %source_dir%\\include\\util\\taoserror.h %tagert_dir%\\include > nul +copy %source_dir%\\include\\libs\\function\\taosudf.h %tagert_dir%\\include > nul +copy %binary_dir%\\build\\lib\\taos.lib %tagert_dir%\\driver > nul +copy %binary_dir%\\build\\lib\\taos_static.lib %tagert_dir%\\driver > nul +copy %binary_dir%\\build\\lib\\taos.dll %tagert_dir%\\driver > nul +copy %binary_dir%\\build\\bin\\taos.exe %tagert_dir% > nul +copy %binary_dir%\\build\\bin\\taosd.exe %tagert_dir% > nul +copy %binary_dir%\\build\\bin\\udfd.exe %tagert_dir% > nul +if exist %binary_dir%\\build\\bin\\taosBenchmark.exe ( + copy %binary_dir%\\build\\bin\\taosBenchmark.exe %tagert_dir% > nul +) +if exist %binary_dir%\\build\\bin\\taosadapter.exe ( + copy %binary_dir%\\build\\bin\\taosadapter.exe %tagert_dir% > nul +) + mshta vbscript:createobject("shell.application").shellexecute("%~s0",":hasAdmin","","runas",1)(window.close)&& echo To start/stop TDengine with administrator privileges: sc start/stop taosd &goto :eof :hasAdmin -cp -f C:\\TDengine\\driver\\taos.dll C:\\Windows\\System32 +copy /y C:\\TDengine\\driver\\taos.dll C:\\Windows\\System32 > nul sc query "taosd" >nul || sc create "taosd" binPath= "C:\\TDengine\\taosd.exe --win_service" start= DEMAND diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index d8d4c5bf2af4b601f451846bc4b2063d62237e29..f554942ce330767b45ef5ec2ee0df422c273c5ed 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -381,8 +381,7 @@ function install_header() { ${install_main_dir}/include || ${csudo}cp -f ${source_dir}/include/client/taos.h ${source_dir}/include/common/taosdef.h ${source_dir}/include/util/taoserror.h ${source_dir}/include/libs/function/taosudf.h \ ${install_main_2_dir}/include && - ${csudo}chmod 644 ${install_main_dir}/include/* ||: - ${csudo}chmod 644 ${install_main_2_dir}/include/* + ${csudo}chmod 644 ${install_main_dir}/include/* || ${csudo}chmod 644 ${install_main_2_dir}/include/* fi } @@ -664,7 +663,9 @@ function install_TDengine() { ## ==============================Main program starts from here============================ echo source directory: $1 echo binary directory: $2 -if [ "$osType" != "Darwin" ]; then +if [ -x ${data_dir}/dnode/dnodeCfg.json ]; then + echo -e "\033[44;31;5mThe default data directory ${data_dir} contains old data of tdengine 2.x, please clear it before installing!\033[0m" +elif [ "$osType" != "Darwin" ]; then if [ -x ${bin_dir}/${clientName} ]; then update_TDengine else diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 6103ce170ce646665ed0d97ef85dad46b2473ed6..f5e3bf18822676f54ee2f20412b5ebb4ce57fd3a 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -85,6 +85,7 @@ else ${build_dir}/bin/${clientName} \ ${taostools_bin_files} \ ${build_dir}/bin/taosadapter \ + ${build_dir}/bin/udfd \ ${script_dir}/remove.sh \ ${script_dir}/set_core.sh \ ${script_dir}/startPre.sh \ @@ -318,7 +319,7 @@ if [ "$verMode" == "cluster" ]; then fi # Copy release note -cp ${script_dir}/release_note ${install_dir} +# cp ${script_dir}/release_note ${install_dir} # exit 1 diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index aa80cfb86c839858e1702ea6c2b7f8fcd9cdfc99..fcc8a2a942931539b1c2f068f14f074479cbfe08 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -118,6 +118,7 @@ function install_bin() { # Remove links ${csudo}rm -f ${bin_link_dir}/taos || : ${csudo}rm -f ${bin_link_dir}/taosd || : + ${csudo}rm -f ${bin_link_dir}/udfd || : ${csudo}rm -f ${bin_link_dir}/taosadapter || : ${csudo}rm -f ${bin_link_dir}/taosBenchmark || : ${csudo}rm -f ${bin_link_dir}/taosdemo || : @@ -130,6 +131,7 @@ function install_bin() { #Make link [ -x ${bin_dir}/taos ] && ${csudo}ln -s ${bin_dir}/taos ${bin_link_dir}/taos || : [ -x ${bin_dir}/taosd ] && ${csudo}ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || : + [ -x ${bin_dir}/udfd ] && ${csudo}ln -s ${bin_dir}/udfd ${bin_link_dir}/udfd || : [ -x ${bin_dir}/taosadapter ] && ${csudo}ln -s ${bin_dir}/taosadapter ${bin_link_dir}/taosadapter || : [ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosdemo || : [ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosBenchmark || : diff --git a/packaging/tools/tdengine.iss b/packaging/tools/tdengine.iss index 73102018159b3d37dca8531cadc2a388a2843651..272a0dfb5c556bf9ed1f5a567579e1a93b10be80 100644 --- a/packaging/tools/tdengine.iss +++ b/packaging/tools/tdengine.iss @@ -51,7 +51,7 @@ Source: taos.bat; DestDir: "{app}\include"; Flags: igNoreversion; ;Source: taosdemo.png; DestDir: "{app}\include"; Flags: igNoreversion; ;Source: taosShell.png; DestDir: "{app}\include"; Flags: igNoreversion; Source: favicon.ico; DestDir: "{app}\include"; Flags: igNoreversion; -Source: {#MyAppSourceDir}{#MyAppDLLName}; DestDir: "{win}\System32"; Flags: igNoreversion; +Source: {#MyAppSourceDir}{#MyAppDLLName}; DestDir: "{win}\System32"; Flags: 64bit;Check:IsWin64; Source: {#MyAppSourceDir}{#MyAppCfgName}; DestDir: "{app}\cfg"; Flags: igNoreversion recursesubdirs createallsubdirs onlyifdoesntexist uninsneveruninstall Source: {#MyAppSourceDir}{#MyAppDriverName}; DestDir: "{app}\driver"; Flags: igNoreversion recursesubdirs createallsubdirs ;Source: {#MyAppSourceDir}{#MyAppConnectorName}; DestDir: "{app}\connector"; Flags: igNoreversion recursesubdirs createallsubdirs diff --git a/source/client/CMakeLists.txt b/source/client/CMakeLists.txt index f52edbe71f151398c5ebdcd705eab376f2318aae..e8e3c878496c58631131922cc6de47491d548f06 100644 --- a/source/client/CMakeLists.txt +++ b/source/client/CMakeLists.txt @@ -27,11 +27,18 @@ else() INCLUDE_DIRECTORIES(jni/linux) endif() +set_target_properties( + taos + PROPERTIES + CLEAN_DIRECT_OUTPUT + 1 +) + set_target_properties( taos PROPERTIES VERSION ${TD_VER_NUMBER} - SOVERSION ${TD_VER_NUMBER} + SOVERSION 1 ) add_library(taos_static STATIC ${CLIENT_SRC}) diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index f275ae0885f10663b3c0ae853ecf1298fac25777..b8fa9580e70c1c7aa17a1402ce6c8113a7f8e094 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -52,15 +52,17 @@ enum { RES_TYPE__QUERY = 1, RES_TYPE__TMQ, RES_TYPE__TMQ_META, + RES_TYPE__TAOSX, }; #define SHOW_VARIABLES_RESULT_COLS 2 #define SHOW_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE) #define SHOW_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE) -#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY) -#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ) -#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META) +#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY) +#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ || *(int8_t*)res == RES_TYPE__TAOSX) +#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META) +#define TD_RES_TMQ_TAOSX(res) (*(int8_t*)res == RES_TYPE__TAOSX) typedef struct SAppInstInfo SAppInstInfo; @@ -95,10 +97,17 @@ typedef struct { } SClientHbMgr; typedef struct SQueryExecMetric { - int64_t start; // start timestamp, us - int64_t parsed; // start to parse, us - int64_t send; // start to send to server, us - int64_t rsp; // receive response from server, us + int64_t start; // start timestamp, us + int64_t syntaxStart; // start to parse, us + int64_t syntaxEnd; // end to parse, us + int64_t ctgStart; // start to parse, us + int64_t ctgEnd; // end to parse, us + int64_t semanticEnd; + int64_t planEnd; + int64_t resultReady; + int64_t execEnd; + int64_t send; // start to send to server, us + int64_t rsp; // receive response from server, us } SQueryExecMetric; struct SAppInstInfo { @@ -132,6 +141,7 @@ typedef struct STscObj { char db[TSDB_DB_FNAME_LEN]; char sVer[TSDB_VERSION_LEN]; char sDetailVer[128]; + int8_t sysInfo; int8_t connType; int32_t acctId; uint32_t connId; @@ -192,8 +202,8 @@ typedef struct { int32_t vgId; SSchemaWrapper schema; int32_t resIter; - SMqDataRsp rsp; SReqResultInfo resInfo; + SMqDataRsp rsp; } SMqRspObj; typedef struct { @@ -204,6 +214,17 @@ typedef struct { SMqMetaRsp metaRsp; } SMqMetaRspObj; +typedef struct { + int8_t resType; + char topic[TSDB_TOPIC_FNAME_LEN]; + char db[TSDB_DB_FNAME_LEN]; + int32_t vgId; + SSchemaWrapper schema; + int32_t resIter; + SReqResultInfo resInfo; + STaosxRsp rsp; +} SMqTaosxRspObj; + typedef struct SRequestObj { int8_t resType; // query or tmq uint64_t requestId; @@ -252,7 +273,7 @@ SRequestObj* execQuery(uint64_t connId, const char* sql, int sqlLen, bool valida TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly); void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly); -int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols); +int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols); static FORCE_INLINE SReqResultInfo* tmqGetCurResInfo(TAOS_RES* res) { SMqRspObj* msg = (SMqRspObj*)res; @@ -363,8 +384,9 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest); int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList); void doAsyncQuery(SRequestObj* pRequest, bool forceUpdateMeta); -int32_t removeMeta(STscObj* pTscObj, SArray* tbList); // todo move to clientImpl.c and become a static function -int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog); // todo move to xxx +int32_t removeMeta(STscObj* pTscObj, SArray* tbList); +int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog); +int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog); bool qnodeRequired(SRequestObj* pRequest); #ifdef __cplusplus diff --git a/source/client/inc/clientLog.h b/source/client/inc/clientLog.h index d47edcd79535a3c8fc5d94aabd3bd8b08d0448f7..ec0a41a68f9515bc7ea2c54e96b0235c0a9683eb 100644 --- a/source/client/inc/clientLog.h +++ b/source/client/inc/clientLog.h @@ -29,6 +29,7 @@ extern "C" { #define tscDebug(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0) #define tscTrace(...) do { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC ", DEBUG_TRACE, cDebugFlag, __VA_ARGS__); }} while(0) #define tscDebugL(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0) +#define tscPerf(...) do { taosPrintLog("TSC ", 0, cDebugFlag, __VA_ARGS__); } while(0) #ifdef __cplusplus } diff --git a/source/client/src/TMQConnector.c b/source/client/src/TMQConnector.c index 17d3a212c482c3462e542721d7d57f516250ff13..fcf6957df92e92b990c60cd3b41342dbbf90ae9e 100644 --- a/source/client/src/TMQConnector.c +++ b/source/client/src/TMQConnector.c @@ -42,6 +42,7 @@ void commit_cb(tmq_t *tmq, int32_t code, void *param) { JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqConfNewImp(JNIEnv *env, jobject jobj) { tmq_conf_t *conf = tmq_conf_new(); + jniGetGlobalMethod(env); return (jlong)conf; } diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index ff1b9322c92a8791ed79a3025f0af362fb441adc..bf92a9ba6af41d8c3048684ed36eec84cc0a6235 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -69,14 +69,26 @@ static void deregisterRequest(SRequestObj *pRequest) { int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1); int32_t num = atomic_sub_fetch_32(&pTscObj->numOfReqs, 1); - int64_t duration = taosGetTimestampUs() - pRequest->metric.start; + int64_t nowUs = taosGetTimestampUs(); + int64_t duration = nowUs - pRequest->metric.start; tscDebug("0x%" PRIx64 " free Request from connObj: 0x%" PRIx64 ", reqId:0x%" PRIx64 " elapsed:%" PRIu64 " ms, current:%d, app current:%d", pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000, num, currentInst); if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) { + tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 + "us, exec:%" PRId64 "us", + duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart, + pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd, + pRequest->metric.execEnd - pRequest->metric.semanticEnd); atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration); } else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) { + tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 + "us, planner:%" PRId64 "us, exec:%" PRId64 "us", + duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart, + pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd, + pRequest->metric.planEnd - pRequest->metric.semanticEnd, + pRequest->metric.resultReady - pRequest->metric.planEnd); atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration); } @@ -330,7 +342,6 @@ void doDestroyRequest(void *p) { schedulerFreeJob(&pRequest->body.queryJob, 0); taosMemoryFreeClear(pRequest->msgBuf); - taosMemoryFreeClear(pRequest->sqlstr); taosMemoryFreeClear(pRequest->pDb); doFreeReqResultInfo(&pRequest->body.resInfo); @@ -349,6 +360,7 @@ void doDestroyRequest(void *p) { taosMemoryFree(pRequest->body.param); } + taosMemoryFreeClear(pRequest->sqlstr); taosMemoryFree(pRequest); tscTrace("end to destroy request %" PRIx64 " p:%p", reqId, pRequest); } @@ -393,7 +405,9 @@ void taos_init_imp(void) { schedulerInit(); tscDebug("starting to initialize TAOS driver"); +#ifndef WINDOWS taosSetCoreDump(true); +#endif initTaskQueue(); fmFuncMgtInit(); diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c index 9475d1b51e51d093bcf7335d1668908e0c039a80..56e3527f9684e17f58c7b0fca5bb7a3fa6210d1c 100644 --- a/source/client/src/clientHb.c +++ b/source/client/src/clientHb.c @@ -145,7 +145,7 @@ static int32_t hbProcessStbInfoRsp(void *value, int32_t valueLen, struct SCatalo } static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) { - SClientHbReq *pReq = taosHashGet(pAppHbMgr->activeInfo, &pRsp->connKey, sizeof(SClientHbKey)); + SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &pRsp->connKey, sizeof(SClientHbKey)); if (NULL == pReq) { tscWarn("pReq to get activeInfo, may be dropped, refId:%" PRIx64 ", type:%d", pRsp->connKey.tscRid, pRsp->connKey.connType); @@ -260,6 +260,8 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) { } } + taosHashRelease(pAppHbMgr->activeInfo, pReq); + return TSDB_CODE_SUCCESS; } @@ -914,10 +916,11 @@ int hbRegisterConn(SAppHbMgr *pAppHbMgr, int64_t tscRefId, int64_t clusterId, in } void hbDeregisterConn(SAppHbMgr *pAppHbMgr, SClientHbKey connKey) { - SClientHbReq *pReq = taosHashGet(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey)); + SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey)); if (pReq) { tFreeClientHbReq(pReq); taosHashRemove(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey)); + taosHashRelease(pAppHbMgr->activeInfo, pReq); } if (NULL == pReq) { diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index acdb3b68b0a88477d1ff5f7856e533e33b49e5a7..f91ceb31840bbf8dccd9144d5a12a41e7f2f358a 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -215,6 +215,7 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC .pUser = pTscObj->user, .schemalessType = pTscObj->schemalessType, .isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)), + .enableSysInfo = pTscObj->sysInfo, .svrVer = pTscObj->sVer, .nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes)}; @@ -238,12 +239,15 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC TSWAP(pRequest->targetTableList, (*pQuery)->pTargetTableList); } + taosArrayDestroy(cxt.pTableMetaPos); + taosArrayDestroy(cxt.pTableVgroupPos); + return code; } int32_t execLocalCmd(SRequestObj* pRequest, SQuery* pQuery) { SRetrieveTableRsp* pRsp = NULL; - int32_t code = qExecCommand(pQuery->pRoot, &pRsp); + int32_t code = qExecCommand(pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp); if (TSDB_CODE_SUCCESS == code && NULL != pRsp) { code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true); } @@ -281,7 +285,7 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) { return; } - int32_t code = qExecCommand(pQuery->pRoot, &pRsp); + int32_t code = qExecCommand(pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp); if (TSDB_CODE_SUCCESS == code && NULL != pRsp) { code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true); } @@ -416,7 +420,8 @@ int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArra .showRewrite = pQuery->showRewrite, .pMsg = pRequest->msgBuf, .msgLen = ERROR_MSG_BUF_DEFAULT_SIZE, - .pUser = pRequest->pTscObj->user}; + .pUser = pRequest->pTscObj->user, + .sysInfo = pRequest->pTscObj->sysInfo}; return qCreateQueryPlan(&cxt, pPlan, pNodeList); } @@ -689,11 +694,11 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList TDMT_VND_CREATE_TABLE == pRequest->type) { pRequest->body.resInfo.numOfRows = res.numOfRows; if (TDMT_VND_SUBMIT == pRequest->type) { - STscObj *pTscObj = pRequest->pTscObj; - SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary; - atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertRows, res.numOfRows); + STscObj* pTscObj = pRequest->pTscObj; + SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary; + atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertRows, res.numOfRows); } - + schedulerFreeJob(&pRequest->body.queryJob, 0); } @@ -718,6 +723,12 @@ int32_t handleSubmitExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog for (int32_t i = 0; i < pRsp->nBlocks; ++i) { SSubmitBlkRsp* blk = pRsp->pBlocks + i; + if (blk->pMeta) { + handleCreateTbExecRes(blk->pMeta, pCatalog); + tFreeSTableMetaRsp(blk->pMeta); + taosMemoryFreeClear(blk->pMeta); + } + if (NULL == blk->tblFName || 0 == blk->tblFName[0]) { continue; } @@ -777,6 +788,10 @@ int32_t handleAlterTbExecRes(void* res, SCatalog* pCatalog) { return catalogUpdateTableMeta(pCatalog, (STableMetaRsp*)res); } +int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog) { + return catalogUpdateTableMeta(pCatalog, (STableMetaRsp*)res); +} + int32_t handleQueryExecRsp(SRequestObj* pRequest) { if (NULL == pRequest->body.resInfo.execRes.res) { return TSDB_CODE_SUCCESS; @@ -799,9 +814,22 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) { code = handleAlterTbExecRes(pRes->res, pCatalog); break; } + case TDMT_VND_CREATE_TABLE: { + SArray* pList = (SArray*)pRes->res; + int32_t num = taosArrayGetSize(pList); + for (int32_t i = 0; i < num; ++i) { + void* res = taosArrayGetP(pList, i); + code = handleCreateTbExecRes(res, pCatalog); + } + break; + } + case TDMT_MND_CREATE_STB: { + code = handleCreateTbExecRes(pRes->res, pCatalog); + break; + } case TDMT_VND_SUBMIT: { - atomic_add_fetch_64((int64_t *)&pAppInfo->summary.insertBytes, pRes->numOfBytes); - + atomic_add_fetch_64((int64_t*)&pAppInfo->summary.insertBytes, pRes->numOfBytes); + code = handleSubmitExecRes(pRequest, pRes->res, pCatalog, &epset); break; } @@ -823,6 +851,8 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) { SRequestObj* pRequest = (SRequestObj*)param; pRequest->code = code; + pRequest->metric.resultReady = taosGetTimestampUs(); + if (pResult) { memcpy(&pRequest->body.resInfo.execRes, pResult, sizeof(*pResult)); } @@ -832,13 +862,15 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) { if (pResult) { pRequest->body.resInfo.numOfRows = pResult->numOfRows; if (TDMT_VND_SUBMIT == pRequest->type) { - STscObj *pTscObj = pRequest->pTscObj; - SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary; - atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertRows, pResult->numOfRows); + STscObj* pTscObj = pRequest->pTscObj; + SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary; + atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertRows, pResult->numOfRows); } } schedulerFreeJob(&pRequest->body.queryJob, 0); + + pRequest->metric.execEnd = taosGetTimestampUs(); } taosMemoryFree(pResult); @@ -856,17 +888,13 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) { return; } - if (code == TSDB_CODE_SUCCESS) { - code = handleQueryExecRsp(pRequest); - ASSERT(pRequest->code == TSDB_CODE_SUCCESS); - pRequest->code = code; - } - tscDebug("schedulerExecCb request type %s", TMSG_INFO(pRequest->type)); - if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) { + if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type) && NULL == pRequest->body.resInfo.execRes.res) { removeMeta(pTscObj, pRequest->targetTableList); } + handleQueryExecRsp(pRequest); + // return to client pRequest->body.queryFp(pRequest->body.param, pRequest, code); } @@ -877,14 +905,14 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue if (pQuery->pRoot) { pRequest->stmtType = pQuery->pRoot->type; } - + if (pQuery->pRoot && !pRequest->inRetry) { - STscObj *pTscObj = pRequest->pTscObj; - SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary; + STscObj* pTscObj = pRequest->pTscObj; + SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary; if (QUERY_NODE_VNODE_MODIF_STMT == pQuery->pRoot->type) { - atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertsReq, 1); + atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertsReq, 1); } else if (QUERY_NODE_SELECT_STMT == pQuery->pRoot->type) { - atomic_add_fetch_64((int64_t *)&pActivity->numOfQueryReq, 1); + atomic_add_fetch_64((int64_t*)&pActivity->numOfQueryReq, 1); } } @@ -927,6 +955,10 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue qDestroyQuery(pQuery); } + if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type) && NULL == pRequest->body.resInfo.execRes.res) { + removeMeta(pRequest->pTscObj, pRequest->targetTableList); + } + handleQueryExecRsp(pRequest); if (NULL != pRequest && TSDB_CODE_SUCCESS != code) { @@ -987,7 +1019,8 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM .showRewrite = pQuery->showRewrite, .pMsg = pRequest->msgBuf, .msgLen = ERROR_MSG_BUF_DEFAULT_SIZE, - .pUser = pRequest->pTscObj->user}; + .pUser = pRequest->pTscObj->user, + .sysInfo = pRequest->pTscObj->sysInfo}; SAppInstInfo* pAppInfo = getAppInfo(pRequest); SQueryPlan* pDag = NULL; @@ -999,6 +1032,8 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM pRequest->body.subplanNum = pDag->numOfSubplans; } + pRequest->metric.planEnd = taosGetTimestampUs(); + if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) { SArray* pNodeList = NULL; buildAsyncExecNodeList(pRequest, &pNodeList, pMnodeList, pResultMeta); @@ -1124,10 +1159,6 @@ SRequestObj* execQuery(uint64_t connId, const char* sql, int sqlLen, bool valida inRetry = true; } while (retryNum++ < REQUEST_TOTAL_EXEC_TIMES); - if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) { - removeMeta(pRequest->pTscObj, pRequest->targetTableList); - } - return pRequest; } @@ -1467,9 +1498,9 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) tscDebug("0x%" PRIx64 " fetch results, numOfRows:%d total Rows:%" PRId64 ", complete:%d, reqId:0x%" PRIx64, pRequest->self, pResInfo->numOfRows, pResInfo->totalRows, pResInfo->completed, pRequest->requestId); - STscObj *pTscObj = pRequest->pTscObj; - SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary; - atomic_add_fetch_64((int64_t *)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen); + STscObj* pTscObj = pRequest->pTscObj; + SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary; + atomic_add_fetch_64((int64_t*)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen); if (pResultInfo->numOfRows == 0) { return NULL; @@ -1572,10 +1603,11 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int } int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) { - int32_t cols = *(int32_t*) (p + sizeof(int32_t) * 3); + int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3); ASSERT(numOfCols == cols); - return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t)*3 + sizeof(uint64_t) + numOfCols * (sizeof(int8_t) + sizeof(int32_t)); + return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) * 3 + sizeof(uint64_t) + + numOfCols * (sizeof(int8_t) + sizeof(int32_t)); } static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) { @@ -1947,7 +1979,7 @@ _OVER: int32_t appendTbToReq(SHashObj* pHash, int32_t pos1, int32_t len1, int32_t pos2, int32_t len2, const char* str, int32_t acctId, char* db) { - SName name; + SName name = {0}; if (len1 <= 0) { return -1; @@ -2006,7 +2038,7 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, bool inEscape = false; int32_t code = 0; - void *pIter = NULL; + void* pIter = NULL; int32_t vIdx = 0; int32_t vPos[2]; diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 0ec724c6d0a388724dbabfd6e4e3e55559c32f14..30860780807a820b041e27729f8e351fb46c99b3 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -184,6 +184,19 @@ void taos_free_result(TAOS_RES *res) { SRequestObj *pRequest = (SRequestObj *)res; tscDebug("0x%" PRIx64 " taos_free_result start to free query", pRequest->requestId); destroyRequest(pRequest); + } else if (TD_RES_TMQ_TAOSX(res)) { + SMqTaosxRspObj *pRsp = (SMqTaosxRspObj *)res; + if (pRsp->rsp.blockData) taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree); + if (pRsp->rsp.blockDataLen) taosArrayDestroy(pRsp->rsp.blockDataLen); + if (pRsp->rsp.withTbName) taosArrayDestroyP(pRsp->rsp.blockTbName, taosMemoryFree); + if (pRsp->rsp.withSchema) taosArrayDestroyP(pRsp->rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper); + // taosx + taosArrayDestroy(pRsp->rsp.createTableLen); + taosArrayDestroyP(pRsp->rsp.createTableReq, taosMemoryFree); + + pRsp->resInfo.pRspMsg = NULL; + doFreeReqResultInfo(&pRsp->resInfo); + taosMemoryFree(pRsp); } else if (TD_RES_TMQ(res)) { SMqRspObj *pRsp = (SMqRspObj *)res; if (pRsp->rsp.blockData) taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree); @@ -192,6 +205,7 @@ void taos_free_result(TAOS_RES *res) { if (pRsp->rsp.withSchema) taosArrayDestroyP(pRsp->rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper); pRsp->resInfo.pRspMsg = NULL; doFreeReqResultInfo(&pRsp->resInfo); + taosMemoryFree(pRsp); } else if (TD_RES_TMQ_META(res)) { SMqMetaRspObj *pRspObj = (SMqMetaRspObj *)res; taosMemoryFree(pRspObj->metaRsp.metaRsp); @@ -673,6 +687,8 @@ static void destorySqlParseWrapper(SqlParseWrapper *pWrapper) { taosArrayDestroy(pWrapper->catalogReq.pIndex); taosArrayDestroy(pWrapper->catalogReq.pUser); taosArrayDestroy(pWrapper->catalogReq.pTableIndex); + taosArrayDestroy(pWrapper->pCtx->pTableMetaPos); + taosArrayDestroy(pWrapper->pCtx->pTableVgroupPos); taosMemoryFree(pWrapper->pCtx); taosMemoryFree(pWrapper); } @@ -682,6 +698,8 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) { SQuery *pQuery = pWrapper->pQuery; SRequestObj *pRequest = pWrapper->pRequest; + pRequest->metric.ctgEnd = taosGetTimestampUs(); + if (code == TSDB_CODE_SUCCESS) { code = qAnalyseSqlSemantic(pWrapper->pCtx, &pWrapper->catalogReq, pResultMeta, pQuery); pRequest->stableQuery = pQuery->stableQuery; @@ -690,6 +708,8 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) { } } + pRequest->metric.semanticEnd = taosGetTimestampUs(); + if (code == TSDB_CODE_SUCCESS) { if (pQuery->haveResultSet) { setResSchemaInfo(&pRequest->body.resInfo, pQuery->pResSchema, pQuery->numOfResCols); @@ -752,6 +772,7 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) { .pUser = pTscObj->user, .schemalessType = pTscObj->schemalessType, .isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)), + .enableSysInfo = pTscObj->sysInfo, .async = true, .svrVer = pTscObj->sVer, .nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes)}; @@ -781,12 +802,16 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { SQuery *pQuery = NULL; + pRequest->metric.syntaxStart = taosGetTimestampUs(); + SCatalogReq catalogReq = {.forceUpdate = updateMetaForce, .qNodeRequired = qnodeRequired(pRequest)}; code = qParseSqlSyntax(pCxt, &pQuery, &catalogReq); if (code != TSDB_CODE_SUCCESS) { goto _error; } + pRequest->metric.syntaxEnd = taosGetTimestampUs(); + if (!updateMetaForce) { STscObj *pTscObj = pRequest->pTscObj; SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary; @@ -813,6 +838,8 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { .requestObjRefId = pCxt->requestRid, .mgmtEps = pCxt->mgmtEpSet}; + pRequest->metric.ctgStart = taosGetTimestampUs(); + code = catalogAsyncGetAllMeta(pCxt->pCatalog, &conn, &catalogReq, retrieveMetaCallback, pWrapper, &pRequest->body.queryJob); pCxt = NULL; diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index 0c4cf23c4e1708f4479a1b744dea37752513670d..a7a16d484ca10a8baa65419105f42e46dc3814f3 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -96,6 +96,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) { connectRsp.epSet.eps[i].fqdn, connectRsp.epSet.eps[i].port, pTscObj->id); } + pTscObj->sysInfo = connectRsp.sysInfo; pTscObj->connId = connectRsp.connId; pTscObj->acctId = connectRsp.acctId; tstrncpy(pTscObj->sVer, connectRsp.sVer, tListLen(pTscObj->sVer)); @@ -232,13 +233,36 @@ int32_t processCreateSTableRsp(void* param, SDataBuf* pMsg, int32_t code) { assert(pMsg != NULL && param != NULL); SRequestObj* pRequest = param; - taosMemoryFree(pMsg->pData); if (code != TSDB_CODE_SUCCESS) { setErrno(pRequest, code); + } else { + SMCreateStbRsp createRsp = {0}; + SDecoder coder = {0}; + tDecoderInit(&coder, pMsg->pData, pMsg->len); + tDecodeSMCreateStbRsp(&coder, &createRsp); + tDecoderClear(&coder); + + pRequest->body.resInfo.execRes.msgType = TDMT_MND_CREATE_STB; + pRequest->body.resInfo.execRes.res = createRsp.pMeta; } + taosMemoryFree(pMsg->pData); + if (pRequest->body.queryFp != NULL) { - removeMeta(pRequest->pTscObj, pRequest->tableList); + SExecResult* pRes = &pRequest->body.resInfo.execRes; + + if (code == TSDB_CODE_SUCCESS) { + SCatalog* pCatalog = NULL; + int32_t ret = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog); + if (pRes->res != NULL) { + ret = handleCreateTbExecRes(pRes->res, pCatalog); + } + + if (ret != TSDB_CODE_SUCCESS) { + code = ret; + } + } + pRequest->body.queryFp(pRequest->body.param, pRequest, code); } else { tsem_post(&pRequest->body.rspSem); diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 9f905a835241d54722cf3e15056d1d1019123dcf..c5ce2f632c0a36ccfda0a85cdac216f82edbc8d6 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -85,8 +85,11 @@ typedef TSDB_SML_PROTOCOL_TYPE SMLProtocolType; typedef enum { SCHEMA_ACTION_NULL, - SCHEMA_ACTION_COLUMN, - SCHEMA_ACTION_TAG + SCHEMA_ACTION_CREATE_STABLE, + SCHEMA_ACTION_ADD_COLUMN, + SCHEMA_ACTION_ADD_TAG, + SCHEMA_ACTION_CHANGE_COLUMN_SIZE, + SCHEMA_ACTION_CHANGE_TAG_SIZE, } ESchemaAction; typedef struct { @@ -219,7 +222,7 @@ static int32_t smlBuildInvalidDataMsg(SSmlMsgBuf *pBuf, const char *msg1, const static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSmlKv *kv, bool isTag, ESchemaAction *action, SSmlHandle *info) { - uint16_t *index = (uint16_t *)taosHashGet(colHash, kv->key, kv->keyLen); + uint16_t *index = colHash ? (uint16_t *)taosHashGet(colHash, kv->key, kv->keyLen) : NULL; if (index) { if (colField[*index].type != kv->type) { uError("SML:0x%" PRIx64 " point type and db type mismatch. key: %s. point type: %d, db type: %d", info->id, @@ -232,16 +235,16 @@ static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSm (colField[*index].type == TSDB_DATA_TYPE_NCHAR && ((colField[*index].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE < kv->length))) { if (isTag) { - *action = SCHEMA_ACTION_TAG; + *action = SCHEMA_ACTION_CHANGE_TAG_SIZE; } else { - *action = SCHEMA_ACTION_COLUMN; + *action = SCHEMA_ACTION_CHANGE_COLUMN_SIZE; } } } else { if (isTag) { - *action = SCHEMA_ACTION_TAG; + *action = SCHEMA_ACTION_ADD_TAG; } else { - *action = SCHEMA_ACTION_COLUMN; + *action = SCHEMA_ACTION_ADD_COLUMN; } } return 0; @@ -310,9 +313,31 @@ static int32_t getBytes(uint8_t type, int32_t length){ } } +static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashObj *schemaHash, SArray *cols, SArray* results, int32_t numOfCols, bool isTag) { + for (int j = 0; j < taosArrayGetSize(cols); ++j) { + SSmlKv *kv = (SSmlKv *)taosArrayGetP(cols, j); + ESchemaAction action = SCHEMA_ACTION_NULL; + smlGenerateSchemaAction(schemaField, schemaHash, kv, isTag, &action, info); + if(action == SCHEMA_ACTION_ADD_COLUMN || action == SCHEMA_ACTION_ADD_TAG){ + SField field = {0}; + field.type = kv->type; + field.bytes = getBytes(kv->type, kv->length); + memcpy(field.name, kv->key, kv->keyLen); + taosArrayPush(results, &field); + }else if(action == SCHEMA_ACTION_CHANGE_COLUMN_SIZE || action == SCHEMA_ACTION_CHANGE_TAG_SIZE){ + uint16_t *index = (uint16_t *)taosHashGet(schemaHash, kv->key, kv->keyLen); + uint16_t newIndex = *index; + if(isTag) newIndex -= numOfCols; + SField *field = (SField *)taosArrayGet(results, newIndex); + field->bytes = getBytes(kv->type, kv->length); + } + } + return TSDB_CODE_SUCCESS; +} + //static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SSmlSTableMeta *sTableData, // int32_t colVer, int32_t tagVer, int8_t source, uint64_t suid){ -static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SSmlSTableMeta *sTableData, +static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray* pColumns, SArray* pTags, STableMeta *pTableMeta, ESchemaAction action){ SRequestObj* pRequest = NULL; @@ -320,6 +345,12 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SSmlSTableMeta *s int32_t code = TSDB_CODE_SUCCESS; SCmdMsgInfo pCmdMsg = {0}; + // put front for free + pReq.numOfColumns = taosArrayGetSize(pColumns); + pReq.pColumns = pColumns; + pReq.numOfTags = taosArrayGetSize(pTags); + pReq.pTags = pTags; + code = buildRequest(info->taos->id, "", 0, NULL, false, &pRequest); if (code != TSDB_CODE_SUCCESS) { goto end; @@ -330,91 +361,41 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SSmlSTableMeta *s goto end; } - if (action == SCHEMA_ACTION_NULL){ + if (action == SCHEMA_ACTION_CREATE_STABLE){ pReq.colVer = 1; pReq.tagVer = 1; pReq.suid = 0; pReq.source = TD_REQ_FROM_APP; - } else if (action == SCHEMA_ACTION_TAG){ + } else if (action == SCHEMA_ACTION_ADD_TAG || action == SCHEMA_ACTION_CHANGE_TAG_SIZE){ pReq.colVer = pTableMeta->sversion; pReq.tagVer = pTableMeta->tversion + 1; pReq.suid = pTableMeta->uid; pReq.source = TD_REQ_FROM_TAOX; - } else if (action == SCHEMA_ACTION_COLUMN){ + } else if (action == SCHEMA_ACTION_ADD_COLUMN || action == SCHEMA_ACTION_CHANGE_COLUMN_SIZE){ pReq.colVer = pTableMeta->sversion + 1; pReq.tagVer = pTableMeta->tversion; pReq.suid = pTableMeta->uid; pReq.source = TD_REQ_FROM_TAOX; } + if (pReq.numOfTags == 0){ + pReq.numOfTags = 1; + SField field = {0}; + field.type = TSDB_DATA_TYPE_NCHAR; + field.bytes = 1; + strcpy(field.name, tsSmlTagName); + taosArrayPush(pReq.pTags, &field); + } + pReq.commentLen = -1; pReq.igExists = true; tNameExtractFullName(pName, pReq.name); - if(action == SCHEMA_ACTION_NULL || action == SCHEMA_ACTION_COLUMN){ - pReq.numOfColumns = taosArrayGetSize(sTableData->cols); - pReq.pColumns = taosArrayInit(pReq.numOfColumns, sizeof(SField)); - for (int i = 0; i < pReq.numOfColumns; i++) { - SSmlKv *kv = (SSmlKv *)taosArrayGetP(sTableData->cols, i); - SField field = {0}; - field.type = kv->type; - field.bytes = getBytes(kv->type, kv->length); - memcpy(field.name, kv->key, kv->keyLen); - taosArrayPush(pReq.pColumns, &field); - } - }else if (action == SCHEMA_ACTION_TAG){ - pReq.numOfColumns = pTableMeta->tableInfo.numOfColumns; - pReq.pColumns = taosArrayInit(pReq.numOfColumns, sizeof(SField)); - for (int i = 0; i < pReq.numOfColumns; i++) { - SSchema *s = &pTableMeta->schema[i]; - SField field = {0}; - field.type = s->type; - field.bytes = s->bytes; - strcpy(field.name, s->name); - taosArrayPush(pReq.pColumns, &field); - } - } - - if(action == SCHEMA_ACTION_NULL || action == SCHEMA_ACTION_TAG){ - pReq.numOfTags = taosArrayGetSize(sTableData->tags); - if (pReq.numOfTags == 0){ - pReq.numOfTags = 1; - pReq.pTags = taosArrayInit(pReq.numOfTags, sizeof(SField)); - SField field = {0}; - field.type = TSDB_DATA_TYPE_NCHAR; - field.bytes = 1; - strcpy(field.name, tsSmlTagName); - taosArrayPush(pReq.pTags, &field); - }else{ - pReq.pTags = taosArrayInit(pReq.numOfTags, sizeof(SField)); - for (int i = 0; i < pReq.numOfTags; i++) { - SSmlKv *kv = (SSmlKv *)taosArrayGetP(sTableData->tags, i); - SField field = {0}; - field.type = kv->type; - field.bytes = getBytes(kv->type, kv->length); - memcpy(field.name, kv->key, kv->keyLen); - taosArrayPush(pReq.pTags, &field); - } - } - }else if (action == SCHEMA_ACTION_COLUMN){ - pReq.numOfTags = pTableMeta->tableInfo.numOfTags; - pReq.pTags = taosArrayInit(pReq.numOfTags, sizeof(SField)); - for (int i = 0; i < pReq.numOfTags; i++) { - SSchema *s = &pTableMeta->schema[i + pTableMeta->tableInfo.numOfColumns]; - SField field = {0}; - field.type = s->type; - field.bytes = s->bytes; - strcpy(field.name, s->name); - taosArrayPush(pReq.pTags, &field); - } - } - pCmdMsg.epSet = getEpSet_s(&info->taos->pAppInfo->mgmtEp); pCmdMsg.msgType = TDMT_MND_CREATE_STB; pCmdMsg.msgLen = tSerializeSMCreateStbReq(NULL, 0, &pReq); pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen); if (NULL == pCmdMsg.pMsg) { - tFreeSMCreateStbReq(&pReq); code = TSDB_CODE_OUT_OF_MEMORY; goto end; } @@ -442,7 +423,10 @@ end: } static int32_t smlModifyDBSchemas(SSmlHandle *info) { - int32_t code = 0; + int32_t code = 0; + SHashObj *hashTmp = NULL; + STableMeta *pTableMeta = NULL; + SName pName = {TSDB_TABLE_NAME_T, info->taos->acctId, {0}, {0}}; strcpy(pName.dbname, info->pRequest->pDb); @@ -455,7 +439,6 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { SSmlSTableMeta **tableMetaSml = (SSmlSTableMeta **)taosHashIterate(info->superTables, NULL); while (tableMetaSml) { SSmlSTableMeta *sTableData = *tableMetaSml; - STableMeta *pTableMeta = NULL; bool needCheckMeta = false; // for multi thread size_t superTableLen = 0; @@ -466,14 +449,19 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta); if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST || code == TSDB_CODE_MND_STB_NOT_EXIST) { - code = smlSendMetaMsg(info, &pName, sTableData, NULL, SCHEMA_ACTION_NULL); + SArray* pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols), sizeof(SField)); + SArray* pTags = taosArrayInit(taosArrayGetSize(sTableData->tags), sizeof(SField)); + smlBuildFieldsList(info, NULL, NULL, sTableData->tags, pTags, 0, true); + smlBuildFieldsList(info, NULL, NULL, sTableData->cols, pColumns, 0, false); + + code = smlSendMetaMsg(info, &pName, pColumns, pTags, NULL, SCHEMA_ACTION_CREATE_STABLE); if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable); + uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname); goto end; } info->cost.numOfCreateSTables++; } else if (code == TSDB_CODE_SUCCESS) { - SHashObj *hashTmp = taosHashInit(pTableMeta->tableInfo.numOfTags, + hashTmp = taosHashInit(pTableMeta->tableInfo.numOfTags, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); for (uint16_t i = pTableMeta->tableInfo.numOfColumns; i < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; i++) { @@ -483,34 +471,70 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { ESchemaAction action = SCHEMA_ACTION_NULL; code = smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->tags, &action, true); if (code != TSDB_CODE_SUCCESS) { - taosHashCleanup(hashTmp); goto end; } - if (action == SCHEMA_ACTION_TAG){ - code = smlSendMetaMsg(info, &pName, sTableData, pTableMeta, action); + if (action != SCHEMA_ACTION_NULL){ + SArray* pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols) + pTableMeta->tableInfo.numOfColumns, sizeof(SField)); + SArray* pTags = taosArrayInit(taosArrayGetSize(sTableData->tags) + pTableMeta->tableInfo.numOfTags, sizeof(SField)); + + for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; i++) { + SField field = {0}; + field.type = pTableMeta->schema[i].type; + field.bytes = pTableMeta->schema[i].bytes; + strcpy(field.name, pTableMeta->schema[i].name); + if(i < pTableMeta->tableInfo.numOfColumns){ + taosArrayPush(pColumns, &field); + }else{ + taosArrayPush(pTags, &field); + } + } + smlBuildFieldsList(info, pTableMeta->schema, hashTmp, sTableData->tags, pTags, pTableMeta->tableInfo.numOfColumns, true); + + code = smlSendMetaMsg(info, &pName, pColumns, pTags, pTableMeta, action); if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable); + uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname); goto end; } } + taosMemoryFreeClear(pTableMeta); code = catalogRefreshTableMeta(info->pCatalog, &conn, &pName, -1); if (code != TSDB_CODE_SUCCESS) { goto end; } + code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } taosHashClear(hashTmp); - for (uint16_t i = 1; i < pTableMeta->tableInfo.numOfColumns; i++) { + for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) { taosHashPut(hashTmp, pTableMeta->schema[i].name, strlen(pTableMeta->schema[i].name), &i, SHORT_BYTES); } action = SCHEMA_ACTION_NULL; code = smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->cols, &action, false); - taosHashCleanup(hashTmp); if (code != TSDB_CODE_SUCCESS) { goto end; } - if (action == SCHEMA_ACTION_COLUMN){ - code = smlSendMetaMsg(info, &pName, sTableData, pTableMeta, action); + if (action != SCHEMA_ACTION_NULL){ + SArray* pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols) + pTableMeta->tableInfo.numOfColumns, sizeof(SField)); + SArray* pTags = taosArrayInit(taosArrayGetSize(sTableData->tags) + pTableMeta->tableInfo.numOfTags, sizeof(SField)); + + for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; i++) { + SField field = {0}; + field.type = pTableMeta->schema[i].type; + field.bytes = pTableMeta->schema[i].bytes; + strcpy(field.name, pTableMeta->schema[i].name); + if(i < pTableMeta->tableInfo.numOfColumns){ + taosArrayPush(pColumns, &field); + }else{ + taosArrayPush(pTags, &field); + } + } + + smlBuildFieldsList(info, pTableMeta->schema, hashTmp, sTableData->cols, pColumns, pTableMeta->tableInfo.numOfColumns, false); + + code = smlSendMetaMsg(info, &pName, pColumns, pTags, pTableMeta, action); if (code != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable); goto end; @@ -526,7 +550,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { uError("SML:0x%" PRIx64 " load table meta error: %s", info->id, tstrerror(code)); goto end; } - if (pTableMeta) taosMemoryFree(pTableMeta); + taosMemoryFreeClear(pTableMeta); code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta); if (code != TSDB_CODE_SUCCESS) { @@ -551,10 +575,13 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { sTableData->tableMeta = pTableMeta; tableMetaSml = (SSmlSTableMeta **)taosHashIterate(info->superTables, tableMetaSml); + taosHashCleanup(hashTmp); } return 0; end: + taosHashCleanup(hashTmp); + taosMemoryFreeClear(pTableMeta); catalogRefreshTableMeta(info->pCatalog, &conn, &pName, 1); return code; } @@ -2057,10 +2084,6 @@ static int32_t smlParseInfluxLine(SSmlHandle *info, const char *sql) { if (info->dataFormat) taosArrayDestroy(cols); return ret; } - if (taosArrayGetSize(cols) > TSDB_MAX_COLUMNS) { - smlBuildInvalidDataMsg(&info->msgBuf, "too many columns than 4096", NULL); - return TSDB_CODE_PAR_TOO_MANY_COLUMNS; - } bool hasTable = true; SSmlTableInfo *tinfo = NULL; @@ -2094,6 +2117,11 @@ static int32_t smlParseInfluxLine(SSmlHandle *info, const char *sql) { return TSDB_CODE_PAR_INVALID_TAGS_NUM; } + if (taosArrayGetSize(cols) + taosArrayGetSize((*oneTable)->tags) > TSDB_MAX_COLUMNS) { + smlBuildInvalidDataMsg(&info->msgBuf, "too many columns than 4096", NULL); + return TSDB_CODE_PAR_TOO_MANY_COLUMNS; + } + (*oneTable)->sTableName = elements.measure; (*oneTable)->sTableNameLen = elements.measureLen; if (strlen((*oneTable)->childTableName) == 0) { diff --git a/source/client/src/taosx.c b/source/client/src/taosx.c new file mode 100644 index 0000000000000000000000000000000000000000..f016120a1f5cbe1b4baf4e42f76a0fea7c80d353 --- /dev/null +++ b/source/client/src/taosx.c @@ -0,0 +1,1674 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "cJSON.h" +#include "clientInt.h" +#include "clientLog.h" +#include "parser.h" +#include "tdatablock.h" +#include "tdef.h" +#include "tglobal.h" +#include "tmsgtype.h" +#include "tqueue.h" +#include "tref.h" +#include "ttimer.h" + +static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id, + int8_t t) { + char* string = NULL; + cJSON* json = cJSON_CreateObject(); + if (json == NULL) { + return string; + } + cJSON* type = cJSON_CreateString("create"); + cJSON_AddItemToObject(json, "type", type); + + // char uid[32] = {0}; + // sprintf(uid, "%"PRIi64, id); + // cJSON* id_ = cJSON_CreateString(uid); + // cJSON_AddItemToObject(json, "id", id_); + cJSON* tableName = cJSON_CreateString(name); + cJSON_AddItemToObject(json, "tableName", tableName); + cJSON* tableType = cJSON_CreateString(t == TSDB_NORMAL_TABLE ? "normal" : "super"); + cJSON_AddItemToObject(json, "tableType", tableType); + // cJSON* version = cJSON_CreateNumber(1); + // cJSON_AddItemToObject(json, "version", version); + + cJSON* columns = cJSON_CreateArray(); + for (int i = 0; i < schemaRow->nCols; i++) { + cJSON* column = cJSON_CreateObject(); + SSchema* s = schemaRow->pSchema + i; + cJSON* cname = cJSON_CreateString(s->name); + cJSON_AddItemToObject(column, "name", cname); + cJSON* ctype = cJSON_CreateNumber(s->type); + cJSON_AddItemToObject(column, "type", ctype); + if (s->type == TSDB_DATA_TYPE_BINARY) { + int32_t length = s->bytes - VARSTR_HEADER_SIZE; + cJSON* cbytes = cJSON_CreateNumber(length); + cJSON_AddItemToObject(column, "length", cbytes); + } else if (s->type == TSDB_DATA_TYPE_NCHAR) { + int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; + cJSON* cbytes = cJSON_CreateNumber(length); + cJSON_AddItemToObject(column, "length", cbytes); + } + cJSON_AddItemToArray(columns, column); + } + cJSON_AddItemToObject(json, "columns", columns); + + cJSON* tags = cJSON_CreateArray(); + for (int i = 0; schemaTag && i < schemaTag->nCols; i++) { + cJSON* tag = cJSON_CreateObject(); + SSchema* s = schemaTag->pSchema + i; + cJSON* tname = cJSON_CreateString(s->name); + cJSON_AddItemToObject(tag, "name", tname); + cJSON* ttype = cJSON_CreateNumber(s->type); + cJSON_AddItemToObject(tag, "type", ttype); + if (s->type == TSDB_DATA_TYPE_BINARY) { + int32_t length = s->bytes - VARSTR_HEADER_SIZE; + cJSON* cbytes = cJSON_CreateNumber(length); + cJSON_AddItemToObject(tag, "length", cbytes); + } else if (s->type == TSDB_DATA_TYPE_NCHAR) { + int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; + cJSON* cbytes = cJSON_CreateNumber(length); + cJSON_AddItemToObject(tag, "length", cbytes); + } + cJSON_AddItemToArray(tags, tag); + } + cJSON_AddItemToObject(json, "tags", tags); + + string = cJSON_PrintUnformatted(json); + cJSON_Delete(json); + return string; +} + +static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) { + SMAlterStbReq req = {0}; + cJSON* json = NULL; + char* string = NULL; + + if (tDeserializeSMAlterStbReq(alterData, alterDataLen, &req) != 0) { + goto end; + } + + json = cJSON_CreateObject(); + if (json == NULL) { + goto end; + } + cJSON* type = cJSON_CreateString("alter"); + cJSON_AddItemToObject(json, "type", type); + // cJSON* uid = cJSON_CreateNumber(id); + // cJSON_AddItemToObject(json, "uid", uid); + SName name = {0}; + tNameFromString(&name, req.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); + cJSON* tableName = cJSON_CreateString(name.tname); + cJSON_AddItemToObject(json, "tableName", tableName); + cJSON* tableType = cJSON_CreateString("super"); + cJSON_AddItemToObject(json, "tableType", tableType); + + cJSON* alterType = cJSON_CreateNumber(req.alterType); + cJSON_AddItemToObject(json, "alterType", alterType); + switch (req.alterType) { + case TSDB_ALTER_TABLE_ADD_TAG: + case TSDB_ALTER_TABLE_ADD_COLUMN: { + TAOS_FIELD* field = taosArrayGet(req.pFields, 0); + cJSON* colName = cJSON_CreateString(field->name); + cJSON_AddItemToObject(json, "colName", colName); + cJSON* colType = cJSON_CreateNumber(field->type); + cJSON_AddItemToObject(json, "colType", colType); + + if (field->type == TSDB_DATA_TYPE_BINARY) { + int32_t length = field->bytes - VARSTR_HEADER_SIZE; + cJSON* cbytes = cJSON_CreateNumber(length); + cJSON_AddItemToObject(json, "colLength", cbytes); + } else if (field->type == TSDB_DATA_TYPE_NCHAR) { + int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; + cJSON* cbytes = cJSON_CreateNumber(length); + cJSON_AddItemToObject(json, "colLength", cbytes); + } + break; + } + case TSDB_ALTER_TABLE_DROP_TAG: + case TSDB_ALTER_TABLE_DROP_COLUMN: { + TAOS_FIELD* field = taosArrayGet(req.pFields, 0); + cJSON* colName = cJSON_CreateString(field->name); + cJSON_AddItemToObject(json, "colName", colName); + break; + } + case TSDB_ALTER_TABLE_UPDATE_TAG_BYTES: + case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: { + TAOS_FIELD* field = taosArrayGet(req.pFields, 0); + cJSON* colName = cJSON_CreateString(field->name); + cJSON_AddItemToObject(json, "colName", colName); + cJSON* colType = cJSON_CreateNumber(field->type); + cJSON_AddItemToObject(json, "colType", colType); + if (field->type == TSDB_DATA_TYPE_BINARY) { + int32_t length = field->bytes - VARSTR_HEADER_SIZE; + cJSON* cbytes = cJSON_CreateNumber(length); + cJSON_AddItemToObject(json, "colLength", cbytes); + } else if (field->type == TSDB_DATA_TYPE_NCHAR) { + int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; + cJSON* cbytes = cJSON_CreateNumber(length); + cJSON_AddItemToObject(json, "colLength", cbytes); + } + break; + } + case TSDB_ALTER_TABLE_UPDATE_TAG_NAME: + case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: { + TAOS_FIELD* oldField = taosArrayGet(req.pFields, 0); + TAOS_FIELD* newField = taosArrayGet(req.pFields, 1); + cJSON* colName = cJSON_CreateString(oldField->name); + cJSON_AddItemToObject(json, "colName", colName); + cJSON* colNewName = cJSON_CreateString(newField->name); + cJSON_AddItemToObject(json, "colNewName", colNewName); + break; + } + default: + break; + } + string = cJSON_PrintUnformatted(json); + +end: + cJSON_Delete(json); + tFreeSMAltertbReq(&req); + return string; +} + +static char* processCreateStb(SMqMetaRsp* metaRsp) { + SVCreateStbReq req = {0}; + SDecoder coder; + char* string = NULL; + + // decode and process req + void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead)); + int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead); + tDecoderInit(&coder, data, len); + + if (tDecodeSVCreateStbReq(&coder, &req) < 0) { + goto _err; + } + string = buildCreateTableJson(&req.schemaRow, &req.schemaTag, req.name, req.suid, TSDB_SUPER_TABLE); + tDecoderClear(&coder); + return string; + +_err: + tDecoderClear(&coder); + return string; +} + +static char* processAlterStb(SMqMetaRsp* metaRsp) { + SVCreateStbReq req = {0}; + SDecoder coder; + char* string = NULL; + + // decode and process req + void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead)); + int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead); + tDecoderInit(&coder, data, len); + + if (tDecodeSVCreateStbReq(&coder, &req) < 0) { + goto _err; + } + string = buildAlterSTableJson(req.alterOriData, req.alterOriDataLen); + tDecoderClear(&coder); + return string; + +_err: + tDecoderClear(&coder); + return string; +} + +static char* buildCreateCTableJson(STag* pTag, char* sname, char* name, SArray* tagName, int64_t id, uint8_t tagNum) { + char* string = NULL; + SArray* pTagVals = NULL; + cJSON* json = cJSON_CreateObject(); + if (json == NULL) { + return string; + } + cJSON* type = cJSON_CreateString("create"); + cJSON_AddItemToObject(json, "type", type); + // char cid[32] = {0}; + // sprintf(cid, "%"PRIi64, id); + // cJSON* cid_ = cJSON_CreateString(cid); + // cJSON_AddItemToObject(json, "id", cid_); + + cJSON* tableName = cJSON_CreateString(name); + cJSON_AddItemToObject(json, "tableName", tableName); + cJSON* tableType = cJSON_CreateString("child"); + cJSON_AddItemToObject(json, "tableType", tableType); + cJSON* using = cJSON_CreateString(sname); + cJSON_AddItemToObject(json, "using", using); + cJSON* tagNumJson = cJSON_CreateNumber(tagNum); + cJSON_AddItemToObject(json, "tagNum", tagNumJson); + // cJSON* version = cJSON_CreateNumber(1); + // cJSON_AddItemToObject(json, "version", version); + + cJSON* tags = cJSON_CreateArray(); + int32_t code = tTagToValArray(pTag, &pTagVals); + if (code) { + goto end; + } + + if (tTagIsJson(pTag)) { + STag* p = (STag*)pTag; + if (p->nTag == 0) { + goto end; + } + char* pJson = parseTagDatatoJson(pTag); + cJSON* tag = cJSON_CreateObject(); + STagVal* pTagVal = taosArrayGet(pTagVals, 0); + + char* ptname = taosArrayGet(tagName, 0); + cJSON* tname = cJSON_CreateString(ptname); + cJSON_AddItemToObject(tag, "name", tname); + // cJSON* cid_ = cJSON_CreateString(""); + // cJSON_AddItemToObject(tag, "cid", cid_); + cJSON* ttype = cJSON_CreateNumber(TSDB_DATA_TYPE_JSON); + cJSON_AddItemToObject(tag, "type", ttype); + cJSON* tvalue = cJSON_CreateString(pJson); + cJSON_AddItemToObject(tag, "value", tvalue); + cJSON_AddItemToArray(tags, tag); + taosMemoryFree(pJson); + goto end; + } + + for (int i = 0; i < taosArrayGetSize(pTagVals); i++) { + STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, i); + + cJSON* tag = cJSON_CreateObject(); + + char* ptname = taosArrayGet(tagName, i); + cJSON* tname = cJSON_CreateString(ptname); + cJSON_AddItemToObject(tag, "name", tname); + // cJSON* cid = cJSON_CreateNumber(pTagVal->cid); + // cJSON_AddItemToObject(tag, "cid", cid); + cJSON* ttype = cJSON_CreateNumber(pTagVal->type); + cJSON_AddItemToObject(tag, "type", ttype); + + cJSON* tvalue = NULL; + if (IS_VAR_DATA_TYPE(pTagVal->type)) { + char* buf = taosMemoryCalloc(pTagVal->nData + 3, 1); + if (!buf) goto end; + dataConverToStr(buf, pTagVal->type, pTagVal->pData, pTagVal->nData, NULL); + tvalue = cJSON_CreateString(buf); + taosMemoryFree(buf); + } else { + double val = 0; + GET_TYPED_DATA(val, double, pTagVal->type, &pTagVal->i64); + tvalue = cJSON_CreateNumber(val); + } + + cJSON_AddItemToObject(tag, "value", tvalue); + cJSON_AddItemToArray(tags, tag); + } + +end: + cJSON_AddItemToObject(json, "tags", tags); + string = cJSON_PrintUnformatted(json); + cJSON_Delete(json); + taosArrayDestroy(pTagVals); + return string; +} + +static char* processCreateTable(SMqMetaRsp* metaRsp) { + SDecoder decoder = {0}; + SVCreateTbBatchReq req = {0}; + SVCreateTbReq* pCreateReq; + char* string = NULL; + // decode + void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead)); + int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead); + tDecoderInit(&decoder, data, len); + if (tDecodeSVCreateTbBatchReq(&decoder, &req) < 0) { + goto _exit; + } + + // loop to create table + for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { + pCreateReq = req.pReqs + iReq; + if (pCreateReq->type == TSDB_CHILD_TABLE) { + string = buildCreateCTableJson((STag*)pCreateReq->ctb.pTag, pCreateReq->ctb.name, pCreateReq->name, + pCreateReq->ctb.tagName, pCreateReq->uid, pCreateReq->ctb.tagNum); + } else if (pCreateReq->type == TSDB_NORMAL_TABLE) { + string = + buildCreateTableJson(&pCreateReq->ntb.schemaRow, NULL, pCreateReq->name, pCreateReq->uid, TSDB_NORMAL_TABLE); + } + } + + tDecoderClear(&decoder); + +_exit: + tDecoderClear(&decoder); + return string; +} + +static char* processAlterTable(SMqMetaRsp* metaRsp) { + SDecoder decoder = {0}; + SVAlterTbReq vAlterTbReq = {0}; + char* string = NULL; + + // decode + void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead)); + int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead); + tDecoderInit(&decoder, data, len); + if (tDecodeSVAlterTbReq(&decoder, &vAlterTbReq) < 0) { + goto _exit; + } + + cJSON* json = cJSON_CreateObject(); + if (json == NULL) { + goto _exit; + } + cJSON* type = cJSON_CreateString("alter"); + cJSON_AddItemToObject(json, "type", type); + // cJSON* uid = cJSON_CreateNumber(id); + // cJSON_AddItemToObject(json, "uid", uid); + cJSON* tableName = cJSON_CreateString(vAlterTbReq.tbName); + cJSON_AddItemToObject(json, "tableName", tableName); + cJSON* tableType = cJSON_CreateString(vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_TAG_VAL ? "child" : "normal"); + cJSON_AddItemToObject(json, "tableType", tableType); + cJSON* alterType = cJSON_CreateNumber(vAlterTbReq.action); + cJSON_AddItemToObject(json, "alterType", alterType); + + switch (vAlterTbReq.action) { + case TSDB_ALTER_TABLE_ADD_COLUMN: { + cJSON* colName = cJSON_CreateString(vAlterTbReq.colName); + cJSON_AddItemToObject(json, "colName", colName); + cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type); + cJSON_AddItemToObject(json, "colType", colType); + + if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY) { + int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE; + cJSON* cbytes = cJSON_CreateNumber(length); + cJSON_AddItemToObject(json, "colLength", cbytes); + } else if (vAlterTbReq.type == TSDB_DATA_TYPE_NCHAR) { + int32_t length = (vAlterTbReq.bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; + cJSON* cbytes = cJSON_CreateNumber(length); + cJSON_AddItemToObject(json, "colLength", cbytes); + } + break; + } + case TSDB_ALTER_TABLE_DROP_COLUMN: { + cJSON* colName = cJSON_CreateString(vAlterTbReq.colName); + cJSON_AddItemToObject(json, "colName", colName); + break; + } + case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: { + cJSON* colName = cJSON_CreateString(vAlterTbReq.colName); + cJSON_AddItemToObject(json, "colName", colName); + cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType); + cJSON_AddItemToObject(json, "colType", colType); + if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY) { + int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE; + cJSON* cbytes = cJSON_CreateNumber(length); + cJSON_AddItemToObject(json, "colLength", cbytes); + } else if (vAlterTbReq.colModType == TSDB_DATA_TYPE_NCHAR) { + int32_t length = (vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; + cJSON* cbytes = cJSON_CreateNumber(length); + cJSON_AddItemToObject(json, "colLength", cbytes); + } + break; + } + case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: { + cJSON* colName = cJSON_CreateString(vAlterTbReq.colName); + cJSON_AddItemToObject(json, "colName", colName); + cJSON* colNewName = cJSON_CreateString(vAlterTbReq.colNewName); + cJSON_AddItemToObject(json, "colNewName", colNewName); + break; + } + case TSDB_ALTER_TABLE_UPDATE_TAG_VAL: { + cJSON* tagName = cJSON_CreateString(vAlterTbReq.tagName); + cJSON_AddItemToObject(json, "colName", tagName); + + bool isNull = vAlterTbReq.isNull; + if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) { + STag* jsonTag = (STag*)vAlterTbReq.pTagVal; + if (jsonTag->nTag == 0) isNull = true; + } + if (!isNull) { + char* buf = NULL; + + if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) { + ASSERT(tTagIsJson(vAlterTbReq.pTagVal) == true); + buf = parseTagDatatoJson(vAlterTbReq.pTagVal); + } else { + buf = taosMemoryCalloc(vAlterTbReq.nTagVal + 1, 1); + dataConverToStr(buf, vAlterTbReq.tagType, vAlterTbReq.pTagVal, vAlterTbReq.nTagVal, NULL); + } + + cJSON* colValue = cJSON_CreateString(buf); + cJSON_AddItemToObject(json, "colValue", colValue); + taosMemoryFree(buf); + } + + cJSON* isNullCJson = cJSON_CreateBool(isNull); + cJSON_AddItemToObject(json, "colValueNull", isNullCJson); + break; + } + default: + break; + } + string = cJSON_PrintUnformatted(json); + +_exit: + tDecoderClear(&decoder); + return string; +} + +static char* processDropSTable(SMqMetaRsp* metaRsp) { + SDecoder decoder = {0}; + SVDropStbReq req = {0}; + char* string = NULL; + + // decode + void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead)); + int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead); + tDecoderInit(&decoder, data, len); + if (tDecodeSVDropStbReq(&decoder, &req) < 0) { + goto _exit; + } + + cJSON* json = cJSON_CreateObject(); + if (json == NULL) { + goto _exit; + } + cJSON* type = cJSON_CreateString("drop"); + cJSON_AddItemToObject(json, "type", type); + cJSON* tableName = cJSON_CreateString(req.name); + cJSON_AddItemToObject(json, "tableName", tableName); + cJSON* tableType = cJSON_CreateString("super"); + cJSON_AddItemToObject(json, "tableType", tableType); + + string = cJSON_PrintUnformatted(json); + +_exit: + tDecoderClear(&decoder); + return string; +} + +static char* processDropTable(SMqMetaRsp* metaRsp) { + SDecoder decoder = {0}; + SVDropTbBatchReq req = {0}; + char* string = NULL; + + // decode + void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead)); + int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead); + tDecoderInit(&decoder, data, len); + if (tDecodeSVDropTbBatchReq(&decoder, &req) < 0) { + goto _exit; + } + + cJSON* json = cJSON_CreateObject(); + if (json == NULL) { + goto _exit; + } + cJSON* type = cJSON_CreateString("drop"); + cJSON_AddItemToObject(json, "type", type); + // cJSON* uid = cJSON_CreateNumber(id); + // cJSON_AddItemToObject(json, "uid", uid); + // cJSON* tableType = cJSON_CreateString("normal"); + // cJSON_AddItemToObject(json, "tableType", tableType); + + cJSON* tableNameList = cJSON_CreateArray(); + for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { + SVDropTbReq* pDropTbReq = req.pReqs + iReq; + + cJSON* tableName = cJSON_CreateString(pDropTbReq->name); + cJSON_AddItemToArray(tableNameList, tableName); + } + cJSON_AddItemToObject(json, "tableNameList", tableNameList); + + string = cJSON_PrintUnformatted(json); + +_exit: + tDecoderClear(&decoder); + return string; +} + +static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) { + SVCreateStbReq req = {0}; + SDecoder coder; + SMCreateStbReq pReq = {0}; + int32_t code = TSDB_CODE_SUCCESS; + SRequestObj* pRequest = NULL; + + code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + + if (!pRequest->pDb) { + code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; + goto end; + } + // decode and process req + void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); + int32_t len = metaLen - sizeof(SMsgHead); + tDecoderInit(&coder, data, len); + if (tDecodeSVCreateStbReq(&coder, &req) < 0) { + code = TSDB_CODE_INVALID_PARA; + goto end; + } + // build create stable + pReq.pColumns = taosArrayInit(req.schemaRow.nCols, sizeof(SField)); + for (int32_t i = 0; i < req.schemaRow.nCols; i++) { + SSchema* pSchema = req.schemaRow.pSchema + i; + SField field = {.type = pSchema->type, .bytes = pSchema->bytes}; + strcpy(field.name, pSchema->name); + taosArrayPush(pReq.pColumns, &field); + } + pReq.pTags = taosArrayInit(req.schemaTag.nCols, sizeof(SField)); + for (int32_t i = 0; i < req.schemaTag.nCols; i++) { + SSchema* pSchema = req.schemaTag.pSchema + i; + SField field = {.type = pSchema->type, .bytes = pSchema->bytes}; + strcpy(field.name, pSchema->name); + taosArrayPush(pReq.pTags, &field); + } + + pReq.colVer = req.schemaRow.version; + pReq.tagVer = req.schemaTag.version; + pReq.numOfColumns = req.schemaRow.nCols; + pReq.numOfTags = req.schemaTag.nCols; + pReq.commentLen = -1; + pReq.suid = req.suid; + pReq.source = TD_REQ_FROM_TAOX; + pReq.igExists = true; + + STscObj* pTscObj = pRequest->pTscObj; + SName tableName; + tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name); + + SCmdMsgInfo pCmdMsg = {0}; + pCmdMsg.epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp); + pCmdMsg.msgType = TDMT_MND_CREATE_STB; + pCmdMsg.msgLen = tSerializeSMCreateStbReq(NULL, 0, &pReq); + pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen); + if (NULL == pCmdMsg.pMsg) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + tSerializeSMCreateStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq); + + SQuery pQuery = {0}; + pQuery.execMode = QUERY_EXEC_MODE_RPC; + pQuery.pCmdMsg = &pCmdMsg; + pQuery.msgType = pQuery.pCmdMsg->msgType; + pQuery.stableQuery = true; + + launchQueryImpl(pRequest, &pQuery, true, NULL); + + if (pRequest->code == TSDB_CODE_SUCCESS) { + SCatalog* pCatalog = NULL; + catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); + catalogRemoveTableMeta(pCatalog, &tableName); + } + + code = pRequest->code; + taosMemoryFree(pCmdMsg.pMsg); + +end: + destroyRequest(pRequest); + tFreeSMCreateStbReq(&pReq); + tDecoderClear(&coder); + return code; +} + +static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) { + SVDropStbReq req = {0}; + SDecoder coder; + SMDropStbReq pReq = {0}; + int32_t code = TSDB_CODE_SUCCESS; + SRequestObj* pRequest = NULL; + + code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + + if (!pRequest->pDb) { + code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; + goto end; + } + // decode and process req + void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); + int32_t len = metaLen - sizeof(SMsgHead); + tDecoderInit(&coder, data, len); + if (tDecodeSVDropStbReq(&coder, &req) < 0) { + code = TSDB_CODE_INVALID_PARA; + goto end; + } + + // build drop stable + pReq.igNotExists = true; + pReq.source = TD_REQ_FROM_TAOX; + pReq.suid = req.suid; + + STscObj* pTscObj = pRequest->pTscObj; + SName tableName = {0}; + tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name); + + SCmdMsgInfo pCmdMsg = {0}; + pCmdMsg.epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp); + pCmdMsg.msgType = TDMT_MND_DROP_STB; + pCmdMsg.msgLen = tSerializeSMDropStbReq(NULL, 0, &pReq); + pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen); + if (NULL == pCmdMsg.pMsg) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + tSerializeSMDropStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq); + + SQuery pQuery = {0}; + pQuery.execMode = QUERY_EXEC_MODE_RPC; + pQuery.pCmdMsg = &pCmdMsg; + pQuery.msgType = pQuery.pCmdMsg->msgType; + pQuery.stableQuery = true; + + launchQueryImpl(pRequest, &pQuery, true, NULL); + + if (pRequest->code == TSDB_CODE_SUCCESS) { + SCatalog* pCatalog = NULL; + catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); + catalogRemoveTableMeta(pCatalog, &tableName); + } + + code = pRequest->code; + taosMemoryFree(pCmdMsg.pMsg); + +end: + destroyRequest(pRequest); + tDecoderClear(&coder); + return code; +} + +typedef struct SVgroupCreateTableBatch { + SVCreateTbBatchReq req; + SVgroupInfo info; + char dbName[TSDB_DB_NAME_LEN]; +} SVgroupCreateTableBatch; + +static void destroyCreateTbReqBatch(void* data) { + SVgroupCreateTableBatch* pTbBatch = (SVgroupCreateTableBatch*)data; + taosArrayDestroy(pTbBatch->req.pArray); +} + +static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { + SVCreateTbBatchReq req = {0}; + SDecoder coder = {0}; + int32_t code = TSDB_CODE_SUCCESS; + SRequestObj* pRequest = NULL; + SQuery* pQuery = NULL; + SHashObj* pVgroupHashmap = NULL; + + code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + + if (!pRequest->pDb) { + code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; + goto end; + } + // decode and process req + void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); + int32_t len = metaLen - sizeof(SMsgHead); + tDecoderInit(&coder, data, len); + if (tDecodeSVCreateTbBatchReq(&coder, &req) < 0) { + code = TSDB_CODE_INVALID_PARA; + goto end; + } + + STscObj* pTscObj = pRequest->pTscObj; + + SVCreateTbReq* pCreateReq = NULL; + SCatalog* pCatalog = NULL; + code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + + pVgroupHashmap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); + if (NULL == pVgroupHashmap) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + taosHashSetFreeFp(pVgroupHashmap, destroyCreateTbReqBatch); + + SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, + .requestId = pRequest->requestId, + .requestObjRefId = pRequest->self, + .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; + + pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName)); + // loop to create table + for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { + pCreateReq = req.pReqs + iReq; + + SVgroupInfo pInfo = {0}; + SName pName = {0}; + toName(pTscObj->acctId, pRequest->pDb, pCreateReq->name, &pName); + code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + taosArrayPush(pRequest->tableList, &pName); + + // change tag cid to new cid + if(pCreateReq->type == TSDB_CHILD_TABLE){ + STableMeta* pTableMeta = NULL; + SName sName = {0}; + toName(pTscObj->acctId, pRequest->pDb, pCreateReq->ctb.name, &sName); + code = catalogGetTableMeta(pCatalog, &conn, &sName, &pTableMeta); + if(code != TSDB_CODE_SUCCESS){ + uError("taosCreateTable:catalogGetTableMeta failed. table name: %s", pCreateReq->ctb.name); + goto end; + } + + for(int32_t i = 0; i < taosArrayGetSize(pCreateReq->ctb.tagName); i++){ + char* tName = taosArrayGet(pCreateReq->ctb.tagName, i); + for(int32_t j = pTableMeta->tableInfo.numOfColumns; j < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; j++){ + SSchema *tag = &pTableMeta->schema[j]; + if(strcmp(tag->name, tName) == 0 && tag->type != TSDB_DATA_TYPE_JSON){ + tTagSetCid((STag *)pCreateReq->ctb.pTag, i, tag->colId); + } + } + } + taosMemoryFreeClear(pTableMeta); + } + + SVgroupCreateTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId)); + if (pTableBatch == NULL) { + SVgroupCreateTableBatch tBatch = {0}; + tBatch.info = pInfo; + strcpy(tBatch.dbName, pRequest->pDb); + + tBatch.req.pArray = taosArrayInit(4, sizeof(struct SVCreateTbReq)); + taosArrayPush(tBatch.req.pArray, pCreateReq); + + taosHashPut(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId), &tBatch, sizeof(tBatch)); + } else { // add to the correct vgroup + taosArrayPush(pTableBatch->req.pArray, pCreateReq); + } + } + + SArray* pBufArray = serializeVgroupsCreateTableBatch(pVgroupHashmap); + if (NULL == pBufArray) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + + pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY); + pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE; + pQuery->msgType = TDMT_VND_CREATE_TABLE; + pQuery->stableQuery = false; + pQuery->pRoot = nodesMakeNode(QUERY_NODE_CREATE_TABLE_STMT); + + code = rewriteToVnodeModifyOpStmt(pQuery, pBufArray); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + + launchQueryImpl(pRequest, pQuery, true, NULL); + if (pRequest->code == TSDB_CODE_SUCCESS) { + removeMeta(pTscObj, pRequest->tableList); + } + + code = pRequest->code; + +end: + taosHashCleanup(pVgroupHashmap); + destroyRequest(pRequest); + tDecoderClear(&coder); + qDestroyQuery(pQuery); + return code; +} + +typedef struct SVgroupDropTableBatch { + SVDropTbBatchReq req; + SVgroupInfo info; + char dbName[TSDB_DB_NAME_LEN]; +} SVgroupDropTableBatch; + +static void destroyDropTbReqBatch(void* data) { + SVgroupDropTableBatch* pTbBatch = (SVgroupDropTableBatch*)data; + taosArrayDestroy(pTbBatch->req.pArray); +} + +static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { + SVDropTbBatchReq req = {0}; + SDecoder coder = {0}; + int32_t code = TSDB_CODE_SUCCESS; + SRequestObj* pRequest = NULL; + SQuery* pQuery = NULL; + SHashObj* pVgroupHashmap = NULL; + + code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + + if (!pRequest->pDb) { + code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; + goto end; + } + // decode and process req + void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); + int32_t len = metaLen - sizeof(SMsgHead); + tDecoderInit(&coder, data, len); + if (tDecodeSVDropTbBatchReq(&coder, &req) < 0) { + code = TSDB_CODE_INVALID_PARA; + goto end; + } + + STscObj* pTscObj = pRequest->pTscObj; + + SVDropTbReq* pDropReq = NULL; + SCatalog* pCatalog = NULL; + code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + + pVgroupHashmap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); + if (NULL == pVgroupHashmap) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + taosHashSetFreeFp(pVgroupHashmap, destroyDropTbReqBatch); + + SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, + .requestId = pRequest->requestId, + .requestObjRefId = pRequest->self, + .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; + pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName)); + // loop to create table + for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { + pDropReq = req.pReqs + iReq; + pDropReq->igNotExists = true; + + SVgroupInfo pInfo = {0}; + SName pName = {0}; + toName(pTscObj->acctId, pRequest->pDb, pDropReq->name, &pName); + code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + + taosArrayPush(pRequest->tableList, &pName); + SVgroupDropTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId)); + if (pTableBatch == NULL) { + SVgroupDropTableBatch tBatch = {0}; + tBatch.info = pInfo; + tBatch.req.pArray = taosArrayInit(TARRAY_MIN_SIZE, sizeof(SVDropTbReq)); + taosArrayPush(tBatch.req.pArray, pDropReq); + + taosHashPut(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId), &tBatch, sizeof(tBatch)); + } else { // add to the correct vgroup + taosArrayPush(pTableBatch->req.pArray, pDropReq); + } + } + + SArray* pBufArray = serializeVgroupsDropTableBatch(pVgroupHashmap); + if (NULL == pBufArray) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + + pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY); + pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE; + pQuery->msgType = TDMT_VND_DROP_TABLE; + pQuery->stableQuery = false; + pQuery->pRoot = nodesMakeNode(QUERY_NODE_DROP_TABLE_STMT); + + code = rewriteToVnodeModifyOpStmt(pQuery, pBufArray); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + + launchQueryImpl(pRequest, pQuery, true, NULL); + if (pRequest->code == TSDB_CODE_SUCCESS) { + removeMeta(pTscObj, pRequest->tableList); + } + code = pRequest->code; + +end: + taosHashCleanup(pVgroupHashmap); + destroyRequest(pRequest); + tDecoderClear(&coder); + qDestroyQuery(pQuery); + return code; +} + +// delete from db.tabl where .. -> delete from tabl where .. +// delete from db .tabl where .. -> delete from tabl where .. +// static void getTbName(char *sql){ +// char *ch = sql; +// +// bool inBackQuote = false; +// int8_t dotIndex = 0; +// while(*ch != '\0'){ +// if(!inBackQuote && *ch == '`'){ +// inBackQuote = true; +// ch++; +// continue; +// } +// +// if(inBackQuote && *ch == '`'){ +// inBackQuote = false; +// ch++; +// +// continue; +// } +// +// if(!inBackQuote && *ch == '.'){ +// dotIndex ++; +// if(dotIndex == 2){ +// memmove(sql, ch + 1, strlen(ch + 1) + 1); +// break; +// } +// } +// ch++; +// } +//} + +static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) { + SDeleteRes req = {0}; + SDecoder coder = {0}; + int32_t code = TSDB_CODE_SUCCESS; + + // decode and process req + void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); + int32_t len = metaLen - sizeof(SMsgHead); + tDecoderInit(&coder, data, len); + if (tDecodeDeleteRes(&coder, &req) < 0) { + code = TSDB_CODE_INVALID_PARA; + goto end; + } + + // getTbName(req.tableFName); + char sql[256] = {0}; + sprintf(sql, "delete from `%s` where `%s` >= %" PRId64 " and `%s` <= %" PRId64, req.tableFName, req.tsColName, + req.skey, req.tsColName, req.ekey); + printf("delete sql:%s\n", sql); + + TAOS_RES* res = taos_query(taos, sql); + SRequestObj* pRequest = (SRequestObj*)res; + code = pRequest->code; + if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) { + code = TSDB_CODE_SUCCESS; + } + taos_free_result(res); + +end: + tDecoderClear(&coder); + return code; +} + +static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) { + SVAlterTbReq req = {0}; + SDecoder coder = {0}; + int32_t code = TSDB_CODE_SUCCESS; + SRequestObj* pRequest = NULL; + SQuery* pQuery = NULL; + SArray* pArray = NULL; + SVgDataBlocks* pVgData = NULL; + + code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest); + + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + + if (!pRequest->pDb) { + code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; + goto end; + } + // decode and process req + void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); + int32_t len = metaLen - sizeof(SMsgHead); + tDecoderInit(&coder, data, len); + if (tDecodeSVAlterTbReq(&coder, &req) < 0) { + code = TSDB_CODE_INVALID_PARA; + goto end; + } + + // do not deal TSDB_ALTER_TABLE_UPDATE_OPTIONS + if (req.action == TSDB_ALTER_TABLE_UPDATE_OPTIONS) { + goto end; + } + + STscObj* pTscObj = pRequest->pTscObj; + SCatalog* pCatalog = NULL; + code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + + SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, + .requestId = pRequest->requestId, + .requestObjRefId = pRequest->self, + .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; + + SVgroupInfo pInfo = {0}; + SName pName = {0}; + toName(pTscObj->acctId, pRequest->pDb, req.tbName, &pName); + code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + + pArray = taosArrayInit(1, sizeof(void*)); + if (NULL == pArray) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + + pVgData = taosMemoryCalloc(1, sizeof(SVgDataBlocks)); + if (NULL == pVgData) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + pVgData->vg = pInfo; + pVgData->pData = taosMemoryMalloc(metaLen); + if (NULL == pVgData->pData) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + memcpy(pVgData->pData, meta, metaLen); + ((SMsgHead*)pVgData->pData)->vgId = htonl(pInfo.vgId); + pVgData->size = metaLen; + pVgData->numOfTables = 1; + taosArrayPush(pArray, &pVgData); + + pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY); + pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE; + pQuery->msgType = TDMT_VND_ALTER_TABLE; + pQuery->stableQuery = false; + pQuery->pRoot = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT); + + code = rewriteToVnodeModifyOpStmt(pQuery, pArray); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + + launchQueryImpl(pRequest, pQuery, true, NULL); + + pVgData = NULL; + pArray = NULL; + code = pRequest->code; + if (code == TSDB_CODE_VND_TABLE_NOT_EXIST) { + code = TSDB_CODE_SUCCESS; + } + + if (pRequest->code == TSDB_CODE_SUCCESS) { + SExecResult* pRes = &pRequest->body.resInfo.execRes; + if (pRes->res != NULL) { + code = handleAlterTbExecRes(pRes->res, pCatalog); + } + } +end: + taosArrayDestroy(pArray); + if (pVgData) taosMemoryFreeClear(pVgData->pData); + taosMemoryFreeClear(pVgData); + destroyRequest(pRequest); + tDecoderClear(&coder); + qDestroyQuery(pQuery); + return code; +} + +typedef struct { + SVgroupInfo vg; + void* data; +} VgData; + +static void destroyVgHash(void* data) { + VgData* vgData = (VgData*)data; + taosMemoryFreeClear(vgData->data); +} + +int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname) { + int32_t code = TSDB_CODE_SUCCESS; + STableMeta* pTableMeta = NULL; + SQuery* pQuery = NULL; + + SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT); + if (!pRequest) { + uError("WriteRaw:createRequest error request is null"); + code = terrno; + goto end; + } + + if (!pRequest->pDb) { + uError("WriteRaw:not use db"); + code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; + goto end; + } + + SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; + strcpy(pName.dbname, pRequest->pDb); + strcpy(pName.tname, tbname); + + struct SCatalog* pCatalog = NULL; + code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog); + if (code != TSDB_CODE_SUCCESS) { + uError("WriteRaw: get gatlog error"); + goto end; + } + + SRequestConnInfo conn = {0}; + conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter; + conn.requestId = pRequest->requestId; + conn.requestObjRefId = pRequest->self; + conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp); + + SVgroupInfo vgData = {0}; + code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vgData); + if (code != TSDB_CODE_SUCCESS) { + uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbname); + goto end; + } + + code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta); + if (code != TSDB_CODE_SUCCESS) { + uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbname); + goto end; + } + uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid); + uint64_t uid = pTableMeta->uid; + int32_t numOfCols = pTableMeta->tableInfo.numOfColumns; + + uint16_t fLen = 0; + int32_t rowSize = 0; + int16_t nVar = 0; + for (int i = 0; i < numOfCols; i++) { + SSchema* schema = pTableMeta->schema + i; + fLen += TYPE_BYTES[schema->type]; + rowSize += schema->bytes; + if (IS_VAR_DATA_TYPE(schema->type)) { + nVar++; + } + } + + int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) + + (int32_t)TD_BITMAP_BYTES(numOfCols - 1); + int32_t schemaLen = 0; + int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize; + + int32_t totalLen = sizeof(SSubmitReq) + submitLen; + SSubmitReq* subReq = taosMemoryCalloc(1, totalLen); + SSubmitBlk* blk = POINTER_SHIFT(subReq, sizeof(SSubmitReq)); + void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk)); + STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen); + + SRowBuilder rb = {0}; + tdSRowInit(&rb, pTableMeta->sversion); + tdSRowSetTpInfo(&rb, numOfCols, fLen); + int32_t dataLen = 0; + + char* pStart = pData + getVersion1BlockMetaSize(pData, numOfCols); + int32_t* colLength = (int32_t*)pStart; + pStart += sizeof(int32_t) * numOfCols; + + SResultColumn* pCol = taosMemoryCalloc(numOfCols, sizeof(SResultColumn)); + + for (int32_t i = 0; i < numOfCols; ++i) { + if (IS_VAR_DATA_TYPE(pTableMeta->schema[i].type)) { + pCol[i].offset = (int32_t*)pStart; + pStart += rows * sizeof(int32_t); + } else { + pCol[i].nullbitmap = pStart; + pStart += BitmapLen(rows); + } + + pCol[i].pData = pStart; + pStart += colLength[i]; + } + + for (int32_t j = 0; j < rows; j++) { + tdSRowResetBuf(&rb, rowData); + int32_t offset = 0; + for (int32_t k = 0; k < numOfCols; k++) { + const SSchema* pColumn = &pTableMeta->schema[k]; + + if (IS_VAR_DATA_TYPE(pColumn->type)) { + if (pCol[k].offset[j] != -1) { + char* data = pCol[k].pData + pCol[k].offset[j]; + tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k); + } else { + tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k); + } + } else { + if (!colDataIsNull_f(pCol[k].nullbitmap, j)) { + char* data = pCol[k].pData + pColumn->bytes * j; + tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k); + } else { + tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k); + } + } + + offset += TYPE_BYTES[pColumn->type]; + } + tdSRowEnd(&rb); + int32_t rowLen = TD_ROW_LEN(rowData); + rowData = POINTER_SHIFT(rowData, rowLen); + dataLen += rowLen; + } + + taosMemoryFree(pCol); + + blk->uid = htobe64(uid); + blk->suid = htobe64(suid); + blk->sversion = htonl(pTableMeta->sversion); + blk->schemaLen = htonl(schemaLen); + blk->numOfRows = htonl(rows); + blk->dataLen = htonl(dataLen); + subReq->length = sizeof(SSubmitReq) + sizeof(SSubmitBlk) + schemaLen + dataLen; + subReq->numOfBlocks = 1; + + pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY); + if (NULL == pQuery) { + uError("create SQuery error"); + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE; + pQuery->haveResultSet = false; + pQuery->msgType = TDMT_VND_SUBMIT; + pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT); + if (NULL == pQuery->pRoot) { + uError("create pQuery->pRoot error"); + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot); + nodeStmt->payloadType = PAYLOAD_TYPE_KV; + nodeStmt->pDataBlocks = taosArrayInit(1, POINTER_BYTES); + + SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks)); + if (NULL == dst) { + code = TSDB_CODE_TSC_OUT_OF_MEMORY; + goto end; + } + dst->vg = vgData; + dst->numOfTables = subReq->numOfBlocks; + dst->size = subReq->length; + dst->pData = (char*)subReq; + subReq->header.vgId = htonl(dst->vg.vgId); + subReq->version = htonl(1); + subReq->header.contLen = htonl(subReq->length); + subReq->length = htonl(subReq->length); + subReq->numOfBlocks = htonl(subReq->numOfBlocks); + subReq = NULL; // no need free + taosArrayPush(nodeStmt->pDataBlocks, &dst); + + launchQueryImpl(pRequest, pQuery, true, NULL); + code = pRequest->code; + +end: + taosMemoryFreeClear(pTableMeta); + qDestroyQuery(pQuery); + return code; +} + +static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) { + int32_t code = TSDB_CODE_SUCCESS; + SHashObj* pVgHash = NULL; + SQuery* pQuery = NULL; + SMqRspObj rspObj = {0}; + SDecoder decoder = {0}; + STableMeta* pTableMeta = NULL; + + terrno = TSDB_CODE_SUCCESS; + SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT); + if (!pRequest) { + uError("WriteRaw:createRequest error request is null"); + return terrno; + } + + rspObj.resIter = -1; + rspObj.resType = RES_TYPE__TMQ; + + tDecoderInit(&decoder, data, dataLen); + code = tDecodeSMqDataRsp(&decoder, &rspObj.rsp); + if (code != 0) { + uError("WriteRaw:decode smqDataRsp error"); + code = TSDB_CODE_INVALID_MSG; + goto end; + } + + if (!pRequest->pDb) { + uError("WriteRaw:not use db"); + code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; + goto end; + } + + pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + taosHashSetFreeFp(pVgHash, destroyVgHash); + struct SCatalog* pCatalog = NULL; + code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog); + if (code != TSDB_CODE_SUCCESS) { + uError("WriteRaw: get gatlog error"); + goto end; + } + + SRequestConnInfo conn = {0}; + conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter; + conn.requestId = pRequest->requestId; + conn.requestObjRefId = pRequest->self; + conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp); + + printf("raw data block num:%d\n", rspObj.rsp.blockNum); + while (++rspObj.resIter < rspObj.rsp.blockNum) { + SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter); + if (!rspObj.rsp.withSchema) { + uError("WriteRaw:no schema, iter:%d", rspObj.resIter); + goto end; + } + SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.rsp.blockSchema, rspObj.resIter); + setResSchemaInfo(&rspObj.resInfo, pSW->pSchema, pSW->nCols); + + code = setQueryResultFromRsp(&rspObj.resInfo, pRetrieve, false, false); + if (code != TSDB_CODE_SUCCESS) { + uError("WriteRaw: setQueryResultFromRsp error"); + goto end; + } + + const char* tbName = (const char*)taosArrayGetP(rspObj.rsp.blockTbName, rspObj.resIter); + if (!tbName) { + uError("WriteRaw: tbname is null"); + code = TSDB_CODE_TMQ_INVALID_MSG; + goto end; + } + + printf("raw data tbname:%s\n", tbName); + SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; + strcpy(pName.dbname, pRequest->pDb); + strcpy(pName.tname, tbName); + + VgData vgData = {0}; + code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &(vgData.vg)); + if (code != TSDB_CODE_SUCCESS) { + uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbName); + goto end; + } + + code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta); + if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST){ + uError("WriteRaw:catalogGetTableMeta table not exist. table name: %s", tbName); + code = TSDB_CODE_SUCCESS; + continue; + } + if (code != TSDB_CODE_SUCCESS) { + uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbName); + goto end; + } + + uint16_t fLen = 0; + int32_t rowSize = 0; + int16_t nVar = 0; + for (int i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) { + SSchema* schema = &pTableMeta->schema[i]; + fLen += TYPE_BYTES[schema->type]; + rowSize += schema->bytes; + if (IS_VAR_DATA_TYPE(schema->type)) { + nVar++; + } + } + + int32_t rows = rspObj.resInfo.numOfRows; + int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) + + (int32_t)TD_BITMAP_BYTES(pTableMeta->tableInfo.numOfColumns - 1); + int32_t schemaLen = 0; + int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize; + + SSubmitReq* subReq = NULL; + SSubmitBlk* blk = NULL; + void* hData = taosHashGet(pVgHash, &vgData.vg.vgId, sizeof(vgData.vg.vgId)); + if (hData) { + vgData = *(VgData*)hData; + + int32_t totalLen = ((SSubmitReq*)(vgData.data))->length + submitLen; + void* tmp = taosMemoryRealloc(vgData.data, totalLen); + if (tmp == NULL) { + code = TSDB_CODE_TSC_OUT_OF_MEMORY; + goto end; + } + vgData.data = tmp; + ((VgData*)hData)->data = tmp; + subReq = (SSubmitReq*)(vgData.data); + blk = POINTER_SHIFT(vgData.data, subReq->length); + } else { + int32_t totalLen = sizeof(SSubmitReq) + submitLen; + void* tmp = taosMemoryCalloc(1, totalLen); + if (tmp == NULL) { + code = TSDB_CODE_TSC_OUT_OF_MEMORY; + goto end; + } + vgData.data = tmp; + taosHashPut(pVgHash, (const char*)&vgData.vg.vgId, sizeof(vgData.vg.vgId), (char*)&vgData, sizeof(vgData)); + subReq = (SSubmitReq*)(vgData.data); + subReq->length = sizeof(SSubmitReq); + subReq->numOfBlocks = 0; + + blk = POINTER_SHIFT(vgData.data, sizeof(SSubmitReq)); + } + + // pSW->pSchema should be same as pTableMeta->schema +// ASSERT(pSW->nCols == pTableMeta->tableInfo.numOfColumns); + uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid); + uint64_t uid = pTableMeta->uid; + int16_t sver = pTableMeta->sversion; + + void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk)); + STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen); + + SRowBuilder rb = {0}; + tdSRowInit(&rb, sver); + tdSRowSetTpInfo(&rb, pTableMeta->tableInfo.numOfColumns, fLen); + int32_t totalLen = 0; + + SHashObj* schemaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + for (int i = 0; i < pSW->nCols; i++) { + SSchema* schema = &pSW->pSchema[i]; + taosHashPut(schemaHash, schema->name, strlen(schema->name), &i, sizeof(int32_t)); + } + + for (int32_t j = 0; j < rows; j++) { + tdSRowResetBuf(&rb, rowData); + + doSetOneRowPtr(&rspObj.resInfo); + rspObj.resInfo.current += 1; + + int32_t offset = 0; + for (int32_t k = 0; k < pTableMeta->tableInfo.numOfColumns; k++) { + const SSchema* pColumn = &pTableMeta->schema[k]; + int32_t* index = taosHashGet(schemaHash, pColumn->name, strlen(pColumn->name)); + if(!index){ + tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k); + }else{ + char* colData = rspObj.resInfo.row[*index]; + if (!colData) { + tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k); + } else { + if (IS_VAR_DATA_TYPE(pColumn->type)) { + colData -= VARSTR_HEADER_SIZE; + } + tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, colData, true, offset, k); + } + } + + offset += TYPE_BYTES[pColumn->type]; + } + tdSRowEnd(&rb); + int32_t rowLen = TD_ROW_LEN(rowData); + rowData = POINTER_SHIFT(rowData, rowLen); + totalLen += rowLen; + } + + taosHashCleanup(schemaHash); + blk->uid = htobe64(uid); + blk->suid = htobe64(suid); + blk->sversion = htonl(sver); + blk->schemaLen = htonl(schemaLen); + blk->numOfRows = htonl(rows); + blk->dataLen = htonl(totalLen); + subReq->length += sizeof(SSubmitBlk) + schemaLen + totalLen; + subReq->numOfBlocks++; + taosMemoryFreeClear(pTableMeta); + } + + pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY); + if (NULL == pQuery) { + uError("create SQuery error"); + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE; + pQuery->haveResultSet = false; + pQuery->msgType = TDMT_VND_SUBMIT; + pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT); + if (NULL == pQuery->pRoot) { + uError("create pQuery->pRoot error"); + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot); + nodeStmt->payloadType = PAYLOAD_TYPE_KV; + + int32_t numOfVg = taosHashGetSize(pVgHash); + nodeStmt->pDataBlocks = taosArrayInit(numOfVg, POINTER_BYTES); + + VgData* vData = (VgData*)taosHashIterate(pVgHash, NULL); + while (vData) { + SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks)); + if (NULL == dst) { + code = TSDB_CODE_TSC_OUT_OF_MEMORY; + goto end; + } + dst->vg = vData->vg; + SSubmitReq* subReq = (SSubmitReq*)(vData->data); + dst->numOfTables = subReq->numOfBlocks; + dst->size = subReq->length; + dst->pData = (char*)subReq; + vData->data = NULL; // no need free + subReq->header.vgId = htonl(dst->vg.vgId); + subReq->version = htonl(1); + subReq->header.contLen = htonl(subReq->length); + subReq->length = htonl(subReq->length); + subReq->numOfBlocks = htonl(subReq->numOfBlocks); + taosArrayPush(nodeStmt->pDataBlocks, &dst); + vData = (VgData*)taosHashIterate(pVgHash, vData); + } + + launchQueryImpl(pRequest, pQuery, true, NULL); + code = pRequest->code; + +end: + tDecoderClear(&decoder); + qDestroyQuery(pQuery); + destroyRequest(pRequest); + taosHashCleanup(pVgHash); + taosMemoryFreeClear(pTableMeta); + return code; +} + +char* tmq_get_json_meta(TAOS_RES* res) { + if (!TD_RES_TMQ_META(res)) { + return NULL; + } + + SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res; + if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_STB) { + return processCreateStb(&pMetaRspObj->metaRsp); + } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_STB) { + return processAlterStb(&pMetaRspObj->metaRsp); + } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_STB) { + return processDropSTable(&pMetaRspObj->metaRsp); + } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_TABLE) { + return processCreateTable(&pMetaRspObj->metaRsp); + } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_TABLE) { + return processAlterTable(&pMetaRspObj->metaRsp); + } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_TABLE) { + return processDropTable(&pMetaRspObj->metaRsp); + } + return NULL; +} + +void tmq_free_json_meta(char* jsonMeta) { taosMemoryFreeClear(jsonMeta); } + +int32_t tmq_get_raw(TAOS_RES* res, tmq_raw_data* raw) { + if (!raw || !res) { + return TSDB_CODE_INVALID_PARA; + } + if (TD_RES_TMQ_META(res)) { + SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res; + raw->raw = pMetaRspObj->metaRsp.metaRsp; + raw->raw_len = pMetaRspObj->metaRsp.metaRspLen; + raw->raw_type = pMetaRspObj->metaRsp.resMsgType; + } else if (TD_RES_TMQ(res)) { + SMqRspObj* rspObj = ((SMqRspObj*)res); + + int32_t len = 0; + int32_t code = 0; + tEncodeSize(tEncodeSMqDataRsp, &rspObj->rsp, len, code); + if (code < 0) { + return -1; + } + + void* buf = taosMemoryCalloc(1, len); + SEncoder encoder = {0}; + tEncoderInit(&encoder, buf, len); + tEncodeSMqDataRsp(&encoder, &rspObj->rsp); + tEncoderClear(&encoder); + + raw->raw = buf; + raw->raw_len = len; + raw->raw_type = RES_TYPE__TMQ; + } else { + return TSDB_CODE_TMQ_INVALID_MSG; + } + return TSDB_CODE_SUCCESS; +} + +void tmq_free_raw(tmq_raw_data raw) { + if (raw.raw_type == RES_TYPE__TMQ) { + taosMemoryFree(raw.raw); + } +} + +int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) { + if (!taos) { + return TSDB_CODE_INVALID_PARA; + } + + if (raw.raw_type == TDMT_VND_CREATE_STB) { + return taosCreateStb(taos, raw.raw, raw.raw_len); + } else if (raw.raw_type == TDMT_VND_ALTER_STB) { + return taosCreateStb(taos, raw.raw, raw.raw_len); + } else if (raw.raw_type == TDMT_VND_DROP_STB) { + return taosDropStb(taos, raw.raw, raw.raw_len); + } else if (raw.raw_type == TDMT_VND_CREATE_TABLE) { + return taosCreateTable(taos, raw.raw, raw.raw_len); + } else if (raw.raw_type == TDMT_VND_ALTER_TABLE) { + return taosAlterTable(taos, raw.raw, raw.raw_len); + } else if (raw.raw_type == TDMT_VND_DROP_TABLE) { + return taosDropTable(taos, raw.raw, raw.raw_len); + } else if (raw.raw_type == TDMT_VND_DELETE) { + return taosDeleteData(taos, raw.raw, raw.raw_len); + } else if (raw.raw_type == RES_TYPE__TMQ) { + return tmqWriteRaw(taos, raw.raw, raw.raw_len); + } + return TSDB_CODE_INVALID_PARA; +} diff --git a/source/client/src/tmq.c b/source/client/src/tmq.c index ea7f03a4162fe255d00bee2881df5e2064a954eb..f08f54ef4bc06d2f2f137e0609e247da4448d46e 100644 --- a/source/client/src/tmq.c +++ b/source/client/src/tmq.c @@ -28,8 +28,9 @@ int32_t tmqAskEp(tmq_t* tmq, bool async); typedef struct { - int8_t inited; - tmr_h timer; + int8_t inited; + tmr_h timer; + int32_t rsetId; } SMqMgmt; static SMqMgmt tmqMgmt = {0}; @@ -55,8 +56,8 @@ struct tmq_conf_t { int8_t autoCommit; int8_t resetOffset; int8_t withTbName; - int8_t ssEnable; - int32_t ssBatchSize; + int8_t snapEnable; + int32_t snapBatchSize; bool hbBgEnable; @@ -70,6 +71,7 @@ struct tmq_conf_t { }; struct tmq_t { + int64_t refId; // conf char groupId[TSDB_CGROUP_LEN]; char clientId[256]; @@ -146,8 +148,8 @@ typedef struct { typedef struct { // subscribe info - char* topicName; - char db[TSDB_DB_FNAME_LEN]; + char topicName[TSDB_TOPIC_FNAME_LEN]; + char db[TSDB_DB_FNAME_LEN]; SArray* vgs; // SArray @@ -162,33 +164,37 @@ typedef struct { union { SMqDataRsp dataRsp; SMqMetaRsp metaRsp; + STaosxRsp taosxRsp; }; } SMqPollRspWrapper; typedef struct { - tmq_t* tmq; + int64_t refId; + int32_t epoch; tsem_t rspSem; int32_t rspErr; } SMqSubscribeCbParam; typedef struct { - tmq_t* tmq; + int64_t refId; + int32_t epoch; int32_t code; int32_t async; tsem_t rspSem; } SMqAskEpCbParam; typedef struct { - tmq_t* tmq; + int64_t refId; + int32_t epoch; SMqClientVg* pVg; SMqClientTopic* pTopic; - int32_t epoch; int32_t vgId; tsem_t rspSem; } SMqPollCbParam; typedef struct { - tmq_t* tmq; + int64_t refId; + int32_t epoch; int8_t automatic; int8_t async; int32_t waitingRspNum; @@ -282,16 +288,21 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value if (strcmp(key, "experimental.snapshot.enable") == 0) { if (strcmp(value, "true") == 0) { - conf->ssEnable = true; + conf->snapEnable = true; return TMQ_CONF_OK; } else if (strcmp(value, "false") == 0) { - conf->ssEnable = false; + conf->snapEnable = false; return TMQ_CONF_OK; } else { return TMQ_CONF_INVALID; } } + if (strcmp(key, "experimental.snapshot.batch.size") == 0) { + conf->snapBatchSize = atoi(value); + return TMQ_CONF_OK; + } + if (strcmp(key, "enable.heartbeat.background") == 0) { if (strcmp(value, "true") == 0) { conf->hbBgEnable = true; @@ -305,11 +316,6 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value return TMQ_CONF_OK; } - if (strcmp(key, "experimental.snapshot.batch.size") == 0) { - conf->ssBatchSize = atoi(value); - return TMQ_CONF_OK; - } - if (strcmp(key, "td.connect.ip") == 0) { conf->ip = strdup(value); return TMQ_CONF_OK; @@ -369,6 +375,38 @@ static int32_t tmqMakeTopicVgKey(char* dst, const char* topicName, int32_t vg) { return sprintf(dst, "%s:%d", topicName, vg); } +int32_t tmqCommitDone(SMqCommitCbParamSet* pParamSet) { + tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, pParamSet->refId); + if (tmq == NULL) { + if (!pParamSet->async) { + tsem_destroy(&pParamSet->rspSem); + } + taosMemoryFree(pParamSet); + terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED; + return -1; + } + + // if no more waiting rsp + if (pParamSet->async) { + // call async cb func + if (pParamSet->automatic && tmq->commitCb) { + tmq->commitCb(tmq, pParamSet->rspErr, tmq->commitCbUserParam); + } else if (!pParamSet->automatic && pParamSet->userCb) { + // sem post + pParamSet->userCb(tmq, pParamSet->rspErr, pParamSet->userParam); + } + taosMemoryFree(pParamSet); + } else { + tsem_post(&pParamSet->rspSem); + } + +#if 0 + taosArrayDestroyP(pParamSet->successfulOffsets, taosMemoryFree); + taosArrayDestroyP(pParamSet->failedOffsets, taosMemoryFree); +#endif + return 0; +} + int32_t tmqCommitCb(void* param, SDataBuf* pBuf, int32_t code) { SMqCommitCbParam* pParam = (SMqCommitCbParam*)param; SMqCommitCbParamSet* pParamSet = (SMqCommitCbParamSet*)pParam->params; @@ -381,6 +419,9 @@ int32_t tmqCommitCb(void* param, SDataBuf* pBuf, int32_t code) { } #endif + taosMemoryFree(pParam->pOffset); + if (pBuf->pData) taosMemoryFree(pBuf->pData); + /*tscDebug("receive offset commit cb of %s on vgId:%d, offset is %" PRId64, pParam->pOffset->subKey, pParam->->vgId, * pOffset->version);*/ @@ -389,23 +430,7 @@ int32_t tmqCommitCb(void* param, SDataBuf* pBuf, int32_t code) { ASSERT(waitingRspNum >= 0); if (waitingRspNum == 0) { - // if no more waiting rsp - if (pParamSet->async) { - // call async cb func - if (pParamSet->automatic && pParamSet->tmq->commitCb) { - pParamSet->tmq->commitCb(pParamSet->tmq, pParamSet->rspErr, pParamSet->tmq->commitCbUserParam); - } else if (!pParamSet->automatic && pParamSet->userCb) { - // sem post - pParamSet->userCb(pParamSet->tmq, pParamSet->rspErr, pParamSet->userParam); - } - } else { - tsem_post(&pParamSet->rspSem); - } - -#if 0 - taosArrayDestroyP(pParamSet->successfulOffsets, taosMemoryFree); - taosArrayDestroyP(pParamSet->failedOffsets, taosMemoryFree); -#endif + tmqCommitDone(pParamSet); } return 0; } @@ -499,7 +524,8 @@ int32_t tmqCommitMsgImpl(tmq_t* tmq, const TAOS_RES* msg, int8_t async, tmq_comm terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } - pParamSet->tmq = tmq; + pParamSet->refId = tmq->refId; + pParamSet->epoch = tmq->epoch; pParamSet->automatic = 0; pParamSet->async = async; pParamSet->userCb = userCb; @@ -560,13 +586,19 @@ int32_t tmqCommitInner(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } - pParamSet->tmq = tmq; + + pParamSet->refId = tmq->refId; + pParamSet->epoch = tmq->epoch; + pParamSet->automatic = automatic; pParamSet->async = async; pParamSet->userCb = userCb; pParamSet->userParam = userParam; tsem_init(&pParamSet->rspSem, 0, 0); + // init as 1 to prevent concurrency issue + pParamSet->waitingRspNum = 1; + for (int32_t i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) { SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i); @@ -595,10 +627,17 @@ int32_t tmqCommitInner(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t return 0; } + int32_t waitingRspNum = atomic_sub_fetch_32(&pParamSet->waitingRspNum, 1); + ASSERT(waitingRspNum >= 0); + if (waitingRspNum == 0) { + tmqCommitDone(pParamSet); + } + if (!async) { tsem_wait(&pParamSet->rspSem); code = pParamSet->rspErr; tsem_destroy(&pParamSet->rspSem); + taosMemoryFree(pParamSet); } else { code = 0; } @@ -622,27 +661,39 @@ int32_t tmqCommitInner(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t } void tmqAssignAskEpTask(void* param, void* tmrId) { - tmq_t* tmq = (tmq_t*)param; - int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM); - *pTaskType = TMQ_DELAYED_TASK__ASK_EP; - taosWriteQitem(tmq->delayedTask, pTaskType); - tsem_post(&tmq->rspSem); + int64_t refId = *(int64_t*)param; + tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); + if (tmq != NULL) { + int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM); + *pTaskType = TMQ_DELAYED_TASK__ASK_EP; + taosWriteQitem(tmq->delayedTask, pTaskType); + tsem_post(&tmq->rspSem); + } + taosMemoryFree(param); } void tmqAssignDelayedCommitTask(void* param, void* tmrId) { - tmq_t* tmq = (tmq_t*)param; - int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM); - *pTaskType = TMQ_DELAYED_TASK__COMMIT; - taosWriteQitem(tmq->delayedTask, pTaskType); - tsem_post(&tmq->rspSem); + int64_t refId = *(int64_t*)param; + tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); + if (tmq != NULL) { + int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM); + *pTaskType = TMQ_DELAYED_TASK__COMMIT; + taosWriteQitem(tmq->delayedTask, pTaskType); + tsem_post(&tmq->rspSem); + } + taosMemoryFree(param); } void tmqAssignDelayedReportTask(void* param, void* tmrId) { - tmq_t* tmq = (tmq_t*)param; - int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM); - *pTaskType = TMQ_DELAYED_TASK__REPORT; - taosWriteQitem(tmq->delayedTask, pTaskType); - tsem_post(&tmq->rspSem); + int64_t refId = *(int64_t*)param; + tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); + if (tmq != NULL) { + int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM); + *pTaskType = TMQ_DELAYED_TASK__REPORT; + taosWriteQitem(tmq->delayedTask, pTaskType); + tsem_post(&tmq->rspSem); + } + taosMemoryFree(param); } int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) { @@ -651,8 +702,11 @@ int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) { } void tmqSendHbReq(void* param, void* tmrId) { - // TODO replace with ref - tmq_t* tmq = (tmq_t*)param; + int64_t refId = *(int64_t*)param; + tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); + if (tmq == NULL) { + return; + } int64_t consumerId = tmq->consumerId; int32_t epoch = tmq->epoch; SMqHbReq* pReq = taosMemoryMalloc(sizeof(SMqHbReq)); @@ -682,7 +736,7 @@ void tmqSendHbReq(void* param, void* tmrId) { asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo); OVER: - taosTmrReset(tmqSendHbReq, 1000, tmq, tmqMgmt.timer, &tmq->hbLiveTimer); + taosTmrReset(tmqSendHbReq, 1000, param, tmqMgmt.timer, &tmq->hbLiveTimer); } int32_t tmqHandleAllDelayedTask(tmq_t* tmq) { @@ -695,10 +749,18 @@ int32_t tmqHandleAllDelayedTask(tmq_t* tmq) { if (*pTaskType == TMQ_DELAYED_TASK__ASK_EP) { tmqAskEp(tmq, true); - taosTmrReset(tmqAssignAskEpTask, 1000, tmq, tmqMgmt.timer, &tmq->epTimer); + + int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t)); + *pRefId = tmq->refId; + + taosTmrReset(tmqAssignAskEpTask, 1000, pRefId, tmqMgmt.timer, &tmq->epTimer); } else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) { tmqCommitInner(tmq, NULL, 1, 1, tmq->commitCb, tmq->commitCbUserParam); - taosTmrReset(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, tmq, tmqMgmt.timer, &tmq->commitTimer); + + int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t)); + *pRefId = tmq->refId; + + taosTmrReset(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, pRefId, tmqMgmt.timer, &tmq->commitTimer); } else if (*pTaskType == TMQ_DELAYED_TASK__REPORT) { } else { ASSERT(0); @@ -733,7 +795,6 @@ void tmqClearUnhandleMsg(tmq_t* tmq) { int32_t tmqSubscribeCb(void* param, SDataBuf* pMsg, int32_t code) { SMqSubscribeCbParam* pParam = (SMqSubscribeCbParam*)param; pParam->rspErr = code; - /*tmq_t* tmq = pParam->tmq;*/ tsem_post(&pParam->rspSem); return 0; } @@ -750,46 +811,44 @@ int32_t tmq_subscription(tmq_t* tmq, tmq_list_t** topics) { } int32_t tmq_unsubscribe(tmq_t* tmq) { + int32_t rsp; + int32_t retryCnt = 0; tmq_list_t* lst = tmq_list_new(); - int32_t rsp = tmq_subscribe(tmq, lst); + while (1) { + rsp = tmq_subscribe(tmq, lst); + if (rsp != TSDB_CODE_MND_CONSUMER_NOT_READY || retryCnt > 5) { + break; + } else { + retryCnt++; + taosMsleep(500); + } + } + tmq_list_destroy(lst); return rsp; } -#if 0 -tmq_t* tmq_consumer_new(void* conn, tmq_conf_t* conf, char* errstr, int32_t errstrLen) { - tmq_t* pTmq = taosMemoryCalloc(sizeof(tmq_t), 1); - if (pTmq == NULL) { - return NULL; - } - pTmq->pTscObj = (STscObj*)conn; - pTmq->status = 0; - pTmq->pollCnt = 0; - pTmq->epoch = 0; - pTmq->epStatus = 0; - pTmq->epSkipCnt = 0; - // set conf - strcpy(pTmq->clientId, conf->clientId); - strcpy(pTmq->groupId, conf->groupId); - pTmq->autoCommit = conf->autoCommit; - pTmq->commit_cb = conf->commit_cb; - pTmq->resetOffsetCfg = conf->resetOffset; +void tmqFreeImpl(void* handle) { + tmq_t* tmq = (tmq_t*)handle; - pTmq->consumerId = generateRequestId() & (((uint64_t)-1) >> 1); - pTmq->clientTopics = taosArrayInit(0, sizeof(SMqClientTopic)); - if (pTmq->clientTopics == NULL) { - taosMemoryFree(pTmq); - return NULL; - } + // TODO stop timer + if (tmq->mqueue) taosCloseQueue(tmq->mqueue); + if (tmq->delayedTask) taosCloseQueue(tmq->delayedTask); + if (tmq->qall) taosFreeQall(tmq->qall); - pTmq->mqueue = taosOpenQueue(); - pTmq->qall = taosAllocateQall(); - - tsem_init(&pTmq->rspSem, 0, 0); + tsem_destroy(&tmq->rspSem); - return pTmq; + int32_t sz = taosArrayGetSize(tmq->clientTopics); + for (int32_t i = 0; i < sz; i++) { + SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i); + if (pTopic->schema.nCols) taosMemoryFree(pTopic->schema.pSchema); + int32_t vgSz = taosArrayGetSize(pTopic->vgs); + taosArrayDestroy(pTopic->vgs); + } + taosArrayDestroy(tmq->clientTopics); + taos_close_internal(tmq->pTscObj); + taosMemoryFree(tmq); } -#endif tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { // init timer @@ -801,6 +860,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } + tmqMgmt.rsetId = taosOpenRef(10000, tmqFreeImpl); } tmq_t* pTmq = taosMemoryCalloc(1, sizeof(tmq_t)); @@ -841,7 +901,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { strcpy(pTmq->clientId, conf->clientId); strcpy(pTmq->groupId, conf->groupId); pTmq->withTbName = conf->withTbName; - pTmq->useSnapshot = conf->ssEnable; + pTmq->useSnapshot = conf->snapEnable; pTmq->autoCommit = conf->autoCommit; pTmq->autoCommitInterval = conf->autoCommitInterval; pTmq->commitCb = conf->commitCb; @@ -869,8 +929,17 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { goto FAIL; } + pTmq->refId = taosAddRef(tmqMgmt.rsetId, pTmq); + if (pTmq->refId < 0) { + tmqFreeImpl(pTmq); + return NULL; + } + + int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t)); + *pRefId = pTmq->refId; + if (pTmq->hbBgEnable) { - pTmq->hbLiveTimer = taosTmrStart(tmqSendHbReq, 1000, pTmq, tmqMgmt.timer); + pTmq->hbLiveTimer = taosTmrStart(tmqSendHbReq, 1000, pRefId, tmqMgmt.timer); } tscInfo("consumer %" PRId64 " is setup, consumer group %s", pTmq->consumerId, pTmq->groupId); @@ -928,7 +997,8 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { SMqSubscribeCbParam param = { .rspErr = 0, - .tmq = tmq, + .refId = tmq->refId, + .epoch = tmq->epoch, }; if (tsem_init(¶m.rspSem, 0, 0) != 0) goto FAIL; @@ -970,12 +1040,16 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { // init ep timer if (tmq->epTimer == NULL) { - tmq->epTimer = taosTmrStart(tmqAssignAskEpTask, 1000, tmq, tmqMgmt.timer); + int64_t* pRefId1 = taosMemoryMalloc(sizeof(int64_t)); + *pRefId1 = tmq->refId; + tmq->epTimer = taosTmrStart(tmqAssignAskEpTask, 1000, pRefId1, tmqMgmt.timer); } // init auto commit timer if (tmq->autoCommit && tmq->commitTimer == NULL) { - tmq->commitTimer = taosTmrStart(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, tmq, tmqMgmt.timer); + int64_t* pRefId2 = taosMemoryMalloc(sizeof(int64_t)); + *pRefId2 = tmq->refId; + tmq->commitTimer = taosTmrStart(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, pRefId2, tmqMgmt.timer); } code = 0; @@ -997,9 +1071,18 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { SMqPollCbParam* pParam = (SMqPollCbParam*)param; SMqClientVg* pVg = pParam->pVg; SMqClientTopic* pTopic = pParam->pTopic; - tmq_t* tmq = pParam->tmq; - int32_t vgId = pParam->vgId; - int32_t epoch = pParam->epoch; + + tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, pParam->refId); + if (tmq == NULL) { + tsem_destroy(&pParam->rspSem); + taosMemoryFree(pParam); + taosMemoryFree(pMsg->pData); + terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED; + return -1; + } + + int32_t epoch = pParam->epoch; + int32_t vgId = pParam->vgId; taosMemoryFree(pParam); if (code != 0) { tscWarn("msg discard from vgId:%d, epoch %d, since %s", vgId, epoch, terrstr()); @@ -1059,18 +1142,29 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { tDecodeSMqDataRsp(&decoder, &pRspWrapper->dataRsp); tDecoderClear(&decoder); memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead)); - } else { - ASSERT(rspType == TMQ_MSG_TYPE__POLL_META_RSP); - tDecodeSMqMetaRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &pRspWrapper->metaRsp); + + tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d", + tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version, + rspType); + + } else if (rspType == TMQ_MSG_TYPE__POLL_META_RSP) { + SDecoder decoder; + tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead)); + tDecodeSMqMetaRsp(&decoder, &pRspWrapper->metaRsp); + tDecoderClear(&decoder); memcpy(&pRspWrapper->metaRsp, pMsg->pData, sizeof(SMqRspHead)); + } else if (rspType == TMQ_MSG_TYPE__TAOSX_RSP) { + SDecoder decoder; + tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead)); + tDecodeSTaosxRsp(&decoder, &pRspWrapper->taosxRsp); + tDecoderClear(&decoder); + memcpy(&pRspWrapper->taosxRsp, pMsg->pData, sizeof(SMqRspHead)); + } else { + ASSERT(0); } taosMemoryFree(pMsg->pData); - tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d", - tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version, - rspType); - taosWriteQitem(tmq->mqueue, pRspWrapper); tsem_post(&tmq->rspSem); @@ -1124,7 +1218,7 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) { SMqClientTopic topic = {0}; SMqSubTopicEp* pTopicEp = taosArrayGet(pRsp->topics, i); topic.schema = pTopicEp->schema; - topic.topicName = strdup(pTopicEp->topic); + tstrncpy(topic.topicName, pTopicEp->topic, TSDB_TOPIC_FNAME_LEN); tstrncpy(topic.db, pTopicEp->db, TSDB_DB_FNAME_LEN); tscDebug("consumer:%" PRId64 ", update topic: %s", tmq->consumerId, topic.topicName); @@ -1153,7 +1247,16 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) { } taosArrayPush(newTopics, &topic); } - if (tmq->clientTopics) taosArrayDestroy(tmq->clientTopics); + if (tmq->clientTopics) { + int32_t sz = taosArrayGetSize(tmq->clientTopics); + for (int32_t i = 0; i < sz; i++) { + SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i); + if (pTopic->schema.nCols) taosMemoryFree(pTopic->schema.pSchema); + int32_t vgSz = taosArrayGetSize(pTopic->vgs); + taosArrayDestroy(pTopic->vgs); + } + taosArrayDestroy(tmq->clientTopics); + } taosHashCleanup(pHash); tmq->clientTopics = newTopics; @@ -1168,8 +1271,20 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) { int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) { SMqAskEpCbParam* pParam = (SMqAskEpCbParam*)param; - tmq_t* tmq = pParam->tmq; int8_t async = pParam->async; + tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, pParam->refId); + + if (tmq == NULL) { + if (!async) { + tsem_destroy(&pParam->rspSem); + } else { + taosMemoryFree(pParam); + } + taosMemoryFree(pMsg->pData); + terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED; + return -1; + } + pParam->code = code; if (code != 0) { tscError("consumer:%" PRId64 ", get topic endpoint error, not ready, wait:%d", tmq->consumerId, pParam->async); @@ -1216,6 +1331,7 @@ END: } else { taosMemoryFree(pParam); } + taosMemoryFree(pMsg->pData); return code; } @@ -1248,7 +1364,8 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) { /*atomic_store_8(&tmq->epStatus, 0);*/ return -1; } - pParam->tmq = tmq; + pParam->refId = tmq->refId; + pParam->epoch = tmq->epoch; pParam->async = async; tsem_init(&pParam->rspSem, 0, 0); @@ -1288,31 +1405,6 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) { return code; } -#if 0 -int32_t tmq_seek(tmq_t* tmq, const tmq_topic_vgroup_t* offset) { - const SMqOffset* pOffset = &offset->offset; - if (strcmp(pOffset->cgroup, tmq->groupId) != 0) { - return TMQ_RESP_ERR__FAIL; - } - int32_t sz = taosArrayGetSize(tmq->clientTopics); - for (int32_t i = 0; i < sz; i++) { - SMqClientTopic* clientTopic = taosArrayGet(tmq->clientTopics, i); - if (strcmp(clientTopic->topicName, pOffset->topicName) == 0) { - int32_t vgSz = taosArrayGetSize(clientTopic->vgs); - for (int32_t j = 0; j < vgSz; j++) { - SMqClientVg* pVg = taosArrayGet(clientTopic->vgs, j); - if (pVg->vgId == pOffset->vgId) { - pVg->currentOffset = pOffset->offset; - tmqClearUnhandleMsg(tmq); - return TMQ_RESP_ERR__SUCCESS; - } - } - } - } - return TMQ_RESP_ERR__FAIL; -} -#endif - SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t timeout, SMqClientTopic* pTopic, SMqClientVg* pVg) { SMqPollReq* pReq = taosMemoryCalloc(1, sizeof(SMqPollReq)); if (pReq == NULL) { @@ -1371,6 +1463,24 @@ SMqRspObj* tmqBuildRspFromWrapper(SMqPollRspWrapper* pWrapper) { return pRspObj; } +SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper) { + SMqTaosxRspObj* pRspObj = taosMemoryCalloc(1, sizeof(SMqTaosxRspObj)); + pRspObj->resType = RES_TYPE__TAOSX; + tstrncpy(pRspObj->topic, pWrapper->topicHandle->topicName, TSDB_TOPIC_FNAME_LEN); + tstrncpy(pRspObj->db, pWrapper->topicHandle->db, TSDB_DB_FNAME_LEN); + pRspObj->vgId = pWrapper->vgHandle->vgId; + pRspObj->resIter = -1; + memcpy(&pRspObj->rsp, &pWrapper->dataRsp, sizeof(SMqTaosxRspObj)); + + pRspObj->resInfo.totalRows = 0; + pRspObj->resInfo.precision = TSDB_TIME_PRECISION_MILLI; + if (!pWrapper->dataRsp.withSchema) { + setResSchemaInfo(&pRspObj->resInfo, pWrapper->topicHandle->schema.pSchema, pWrapper->topicHandle->schema.nCols); + } + + return pRspObj; +} + int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { /*tscDebug("call poll");*/ for (int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) { @@ -1406,11 +1516,12 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { tsem_post(&tmq->rspSem); return -1; } - pParam->tmq = tmq; + pParam->refId = tmq->refId; + pParam->epoch = tmq->epoch; + pParam->pVg = pVg; pParam->pTopic = pTopic; pParam->vgId = pVg->vgId; - pParam->epoch = tmq->epoch; SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); if (sendInfo == NULL) { @@ -1511,8 +1622,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { SMqClientVg* pVg = pollRspWrapper->vgHandle; /*printf("vgId:%d, offset %" PRId64 " up to %" PRId64 "\n", pVg->vgId, pVg->currentOffset, * rspMsg->msg.rspOffset);*/ - pVg->currentOffset.version = pollRspWrapper->metaRsp.rspOffset; - pVg->currentOffset.type = TMQ_OFFSET__LOG; + pVg->currentOffset = pollRspWrapper->metaRsp.rspOffset; atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE); // build rsp SMqMetaRspObj* pRsp = tmqBuildMetaRspFromWrapper(pollRspWrapper); @@ -1523,6 +1633,30 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { pollRspWrapper->metaRsp.head.epoch, consumerEpoch); taosFreeQitem(pollRspWrapper); } + } else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__TAOSX_RSP) { + SMqPollRspWrapper* pollRspWrapper = (SMqPollRspWrapper*)rspWrapper; + /*atomic_sub_fetch_32(&tmq->readyRequest, 1);*/ + int32_t consumerEpoch = atomic_load_32(&tmq->epoch); + if (pollRspWrapper->taosxRsp.head.epoch == consumerEpoch) { + SMqClientVg* pVg = pollRspWrapper->vgHandle; + /*printf("vgId:%d, offset %" PRId64 " up to %" PRId64 "\n", pVg->vgId, pVg->currentOffset, + * rspMsg->msg.rspOffset);*/ + pVg->currentOffset = pollRspWrapper->taosxRsp.rspOffset; + atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE); + if (pollRspWrapper->taosxRsp.blockNum == 0) { + taosFreeQitem(pollRspWrapper); + rspWrapper = NULL; + continue; + } + // build rsp + SMqRspObj* pRsp = tmqBuildRspFromWrapper(pollRspWrapper); + taosFreeQitem(pollRspWrapper); + return pRsp; + } else { + tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d\n", + pollRspWrapper->taosxRsp.head.epoch, consumerEpoch); + taosFreeQitem(pollRspWrapper); + } } else { /*printf("handle ep rsp %d\n", rspMsg->head.mqMsgType);*/ bool reset = false; @@ -1550,7 +1684,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { } #endif - // in no topic status also need process delayed task + // in no topic status, delayed task also need to be processed if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__INIT) { return NULL; } @@ -1615,7 +1749,7 @@ int32_t tmq_consumer_close(tmq_t* tmq) { /*return rsp;*/ return 0; } - // TODO: free resources + taosRemoveRef(tmqMgmt.rsetId, tmq->refId); return 0; } @@ -1635,9 +1769,11 @@ tmq_res_t tmq_get_res_type(TAOS_RES* res) { } else if (TD_RES_TMQ_META(res)) { SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res; if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DELETE) { - return TMQ_RES_DATA; + return TMQ_RES_TAOSX; } return TMQ_RES_TABLE_META; + } else if (TD_RES_TMQ_TAOSX(res)) { + return TMQ_RES_TAOSX; } else { return TMQ_RES_INVALID; } @@ -1691,1610 +1827,6 @@ const char* tmq_get_table_name(TAOS_RES* res) { return NULL; } -static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id, - int8_t t) { - char* string = NULL; - cJSON* json = cJSON_CreateObject(); - if (json == NULL) { - return string; - } - cJSON* type = cJSON_CreateString("create"); - cJSON_AddItemToObject(json, "type", type); - - // char uid[32] = {0}; - // sprintf(uid, "%"PRIi64, id); - // cJSON* id_ = cJSON_CreateString(uid); - // cJSON_AddItemToObject(json, "id", id_); - cJSON* tableName = cJSON_CreateString(name); - cJSON_AddItemToObject(json, "tableName", tableName); - cJSON* tableType = cJSON_CreateString(t == TSDB_NORMAL_TABLE ? "normal" : "super"); - cJSON_AddItemToObject(json, "tableType", tableType); - // cJSON* version = cJSON_CreateNumber(1); - // cJSON_AddItemToObject(json, "version", version); - - cJSON* columns = cJSON_CreateArray(); - for (int i = 0; i < schemaRow->nCols; i++) { - cJSON* column = cJSON_CreateObject(); - SSchema* s = schemaRow->pSchema + i; - cJSON* cname = cJSON_CreateString(s->name); - cJSON_AddItemToObject(column, "name", cname); - cJSON* ctype = cJSON_CreateNumber(s->type); - cJSON_AddItemToObject(column, "type", ctype); - if (s->type == TSDB_DATA_TYPE_BINARY) { - int32_t length = s->bytes - VARSTR_HEADER_SIZE; - cJSON* cbytes = cJSON_CreateNumber(length); - cJSON_AddItemToObject(column, "length", cbytes); - } else if (s->type == TSDB_DATA_TYPE_NCHAR) { - int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; - cJSON* cbytes = cJSON_CreateNumber(length); - cJSON_AddItemToObject(column, "length", cbytes); - } - cJSON_AddItemToArray(columns, column); - } - cJSON_AddItemToObject(json, "columns", columns); - - cJSON* tags = cJSON_CreateArray(); - for (int i = 0; schemaTag && i < schemaTag->nCols; i++) { - cJSON* tag = cJSON_CreateObject(); - SSchema* s = schemaTag->pSchema + i; - cJSON* tname = cJSON_CreateString(s->name); - cJSON_AddItemToObject(tag, "name", tname); - cJSON* ttype = cJSON_CreateNumber(s->type); - cJSON_AddItemToObject(tag, "type", ttype); - if (s->type == TSDB_DATA_TYPE_BINARY) { - int32_t length = s->bytes - VARSTR_HEADER_SIZE; - cJSON* cbytes = cJSON_CreateNumber(length); - cJSON_AddItemToObject(tag, "length", cbytes); - } else if (s->type == TSDB_DATA_TYPE_NCHAR) { - int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; - cJSON* cbytes = cJSON_CreateNumber(length); - cJSON_AddItemToObject(tag, "length", cbytes); - } - cJSON_AddItemToArray(tags, tag); - } - cJSON_AddItemToObject(json, "tags", tags); - - string = cJSON_PrintUnformatted(json); - cJSON_Delete(json); - return string; -} - -static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) { - SMAlterStbReq req = {0}; - cJSON* json = NULL; - char* string = NULL; - - if (tDeserializeSMAlterStbReq(alterData, alterDataLen, &req) != 0) { - goto end; - } - - json = cJSON_CreateObject(); - if (json == NULL) { - goto end; - } - cJSON* type = cJSON_CreateString("alter"); - cJSON_AddItemToObject(json, "type", type); - // cJSON* uid = cJSON_CreateNumber(id); - // cJSON_AddItemToObject(json, "uid", uid); - SName name = {0}; - tNameFromString(&name, req.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); - cJSON* tableName = cJSON_CreateString(name.tname); - cJSON_AddItemToObject(json, "tableName", tableName); - cJSON* tableType = cJSON_CreateString("super"); - cJSON_AddItemToObject(json, "tableType", tableType); - - cJSON* alterType = cJSON_CreateNumber(req.alterType); - cJSON_AddItemToObject(json, "alterType", alterType); - switch (req.alterType) { - case TSDB_ALTER_TABLE_ADD_TAG: - case TSDB_ALTER_TABLE_ADD_COLUMN: { - TAOS_FIELD* field = taosArrayGet(req.pFields, 0); - cJSON* colName = cJSON_CreateString(field->name); - cJSON_AddItemToObject(json, "colName", colName); - cJSON* colType = cJSON_CreateNumber(field->type); - cJSON_AddItemToObject(json, "colType", colType); - - if (field->type == TSDB_DATA_TYPE_BINARY) { - int32_t length = field->bytes - VARSTR_HEADER_SIZE; - cJSON* cbytes = cJSON_CreateNumber(length); - cJSON_AddItemToObject(json, "colLength", cbytes); - } else if (field->type == TSDB_DATA_TYPE_NCHAR) { - int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; - cJSON* cbytes = cJSON_CreateNumber(length); - cJSON_AddItemToObject(json, "colLength", cbytes); - } - break; - } - case TSDB_ALTER_TABLE_DROP_TAG: - case TSDB_ALTER_TABLE_DROP_COLUMN: { - TAOS_FIELD* field = taosArrayGet(req.pFields, 0); - cJSON* colName = cJSON_CreateString(field->name); - cJSON_AddItemToObject(json, "colName", colName); - break; - } - case TSDB_ALTER_TABLE_UPDATE_TAG_BYTES: - case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: { - TAOS_FIELD* field = taosArrayGet(req.pFields, 0); - cJSON* colName = cJSON_CreateString(field->name); - cJSON_AddItemToObject(json, "colName", colName); - cJSON* colType = cJSON_CreateNumber(field->type); - cJSON_AddItemToObject(json, "colType", colType); - if (field->type == TSDB_DATA_TYPE_BINARY) { - int32_t length = field->bytes - VARSTR_HEADER_SIZE; - cJSON* cbytes = cJSON_CreateNumber(length); - cJSON_AddItemToObject(json, "colLength", cbytes); - } else if (field->type == TSDB_DATA_TYPE_NCHAR) { - int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; - cJSON* cbytes = cJSON_CreateNumber(length); - cJSON_AddItemToObject(json, "colLength", cbytes); - } - break; - } - case TSDB_ALTER_TABLE_UPDATE_TAG_NAME: - case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: { - TAOS_FIELD* oldField = taosArrayGet(req.pFields, 0); - TAOS_FIELD* newField = taosArrayGet(req.pFields, 1); - cJSON* colName = cJSON_CreateString(oldField->name); - cJSON_AddItemToObject(json, "colName", colName); - cJSON* colNewName = cJSON_CreateString(newField->name); - cJSON_AddItemToObject(json, "colNewName", colNewName); - break; - } - default: - break; - } - string = cJSON_PrintUnformatted(json); - -end: - cJSON_Delete(json); - tFreeSMAltertbReq(&req); - return string; -} - -static char* processCreateStb(SMqMetaRsp* metaRsp) { - SVCreateStbReq req = {0}; - SDecoder coder; - char* string = NULL; - - // decode and process req - void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead)); - int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead); - tDecoderInit(&coder, data, len); - - if (tDecodeSVCreateStbReq(&coder, &req) < 0) { - goto _err; - } - string = buildCreateTableJson(&req.schemaRow, &req.schemaTag, req.name, req.suid, TSDB_SUPER_TABLE); - tDecoderClear(&coder); - return string; - -_err: - tDecoderClear(&coder); - return string; -} - -static char* processAlterStb(SMqMetaRsp* metaRsp) { - SVCreateStbReq req = {0}; - SDecoder coder; - char* string = NULL; - - // decode and process req - void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead)); - int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead); - tDecoderInit(&coder, data, len); - - if (tDecodeSVCreateStbReq(&coder, &req) < 0) { - goto _err; - } - string = buildAlterSTableJson(req.alterOriData, req.alterOriDataLen); - tDecoderClear(&coder); - return string; - -_err: - tDecoderClear(&coder); - return string; -} - -static char* buildCreateCTableJson(STag* pTag, char* sname, char* name, SArray* tagName, int64_t id, uint8_t tagNum) { - char* string = NULL; - SArray* pTagVals = NULL; - cJSON* json = cJSON_CreateObject(); - if (json == NULL) { - return string; - } - cJSON* type = cJSON_CreateString("create"); - cJSON_AddItemToObject(json, "type", type); - // char cid[32] = {0}; - // sprintf(cid, "%"PRIi64, id); - // cJSON* cid_ = cJSON_CreateString(cid); - // cJSON_AddItemToObject(json, "id", cid_); - - cJSON* tableName = cJSON_CreateString(name); - cJSON_AddItemToObject(json, "tableName", tableName); - cJSON* tableType = cJSON_CreateString("child"); - cJSON_AddItemToObject(json, "tableType", tableType); - cJSON* using = cJSON_CreateString(sname); - cJSON_AddItemToObject(json, "using", using); - cJSON* tagNumJson = cJSON_CreateNumber(tagNum); - cJSON_AddItemToObject(json, "tagNum", tagNumJson); - // cJSON* version = cJSON_CreateNumber(1); - // cJSON_AddItemToObject(json, "version", version); - - cJSON* tags = cJSON_CreateArray(); - int32_t code = tTagToValArray(pTag, &pTagVals); - if (code) { - goto end; - } - - if (tTagIsJson(pTag)) { - STag* p = (STag*)pTag; - if (p->nTag == 0) { - goto end; - } - char* pJson = parseTagDatatoJson(pTag); - cJSON* tag = cJSON_CreateObject(); - STagVal* pTagVal = taosArrayGet(pTagVals, 0); - - char* ptname = taosArrayGet(tagName, 0); - cJSON* tname = cJSON_CreateString(ptname); - cJSON_AddItemToObject(tag, "name", tname); - // cJSON* cid_ = cJSON_CreateString(""); - // cJSON_AddItemToObject(tag, "cid", cid_); - cJSON* ttype = cJSON_CreateNumber(TSDB_DATA_TYPE_JSON); - cJSON_AddItemToObject(tag, "type", ttype); - cJSON* tvalue = cJSON_CreateString(pJson); - cJSON_AddItemToObject(tag, "value", tvalue); - cJSON_AddItemToArray(tags, tag); - taosMemoryFree(pJson); - goto end; - } - - for (int i = 0; i < taosArrayGetSize(pTagVals); i++) { - STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, i); - - cJSON* tag = cJSON_CreateObject(); - - char* ptname = taosArrayGet(tagName, i); - cJSON* tname = cJSON_CreateString(ptname); - cJSON_AddItemToObject(tag, "name", tname); - // cJSON* cid = cJSON_CreateNumber(pTagVal->cid); - // cJSON_AddItemToObject(tag, "cid", cid); - cJSON* ttype = cJSON_CreateNumber(pTagVal->type); - cJSON_AddItemToObject(tag, "type", ttype); - - cJSON* tvalue = NULL; - if (IS_VAR_DATA_TYPE(pTagVal->type)) { - char* buf = taosMemoryCalloc(pTagVal->nData + 3, 1); - if (!buf) goto end; - dataConverToStr(buf, pTagVal->type, pTagVal->pData, pTagVal->nData, NULL); - tvalue = cJSON_CreateString(buf); - taosMemoryFree(buf); - } else { - double val = 0; - GET_TYPED_DATA(val, double, pTagVal->type, &pTagVal->i64); - tvalue = cJSON_CreateNumber(val); - } - - cJSON_AddItemToObject(tag, "value", tvalue); - cJSON_AddItemToArray(tags, tag); - } - -end: - cJSON_AddItemToObject(json, "tags", tags); - string = cJSON_PrintUnformatted(json); - cJSON_Delete(json); - taosArrayDestroy(pTagVals); - return string; -} - -static char* processCreateTable(SMqMetaRsp* metaRsp) { - SDecoder decoder = {0}; - SVCreateTbBatchReq req = {0}; - SVCreateTbReq* pCreateReq; - char* string = NULL; - // decode - void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead)); - int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead); - tDecoderInit(&decoder, data, len); - if (tDecodeSVCreateTbBatchReq(&decoder, &req) < 0) { - goto _exit; - } - - // loop to create table - for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { - pCreateReq = req.pReqs + iReq; - if (pCreateReq->type == TSDB_CHILD_TABLE) { - string = buildCreateCTableJson((STag*)pCreateReq->ctb.pTag, pCreateReq->ctb.name, pCreateReq->name, - pCreateReq->ctb.tagName, pCreateReq->uid, pCreateReq->ctb.tagNum); - } else if (pCreateReq->type == TSDB_NORMAL_TABLE) { - string = - buildCreateTableJson(&pCreateReq->ntb.schemaRow, NULL, pCreateReq->name, pCreateReq->uid, TSDB_NORMAL_TABLE); - } - } - - tDecoderClear(&decoder); - -_exit: - tDecoderClear(&decoder); - return string; -} - -static char* processAlterTable(SMqMetaRsp* metaRsp) { - SDecoder decoder = {0}; - SVAlterTbReq vAlterTbReq = {0}; - char* string = NULL; - - // decode - void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead)); - int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead); - tDecoderInit(&decoder, data, len); - if (tDecodeSVAlterTbReq(&decoder, &vAlterTbReq) < 0) { - goto _exit; - } - - cJSON* json = cJSON_CreateObject(); - if (json == NULL) { - goto _exit; - } - cJSON* type = cJSON_CreateString("alter"); - cJSON_AddItemToObject(json, "type", type); - // cJSON* uid = cJSON_CreateNumber(id); - // cJSON_AddItemToObject(json, "uid", uid); - cJSON* tableName = cJSON_CreateString(vAlterTbReq.tbName); - cJSON_AddItemToObject(json, "tableName", tableName); - cJSON* tableType = cJSON_CreateString(vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_TAG_VAL ? "child" : "normal"); - cJSON_AddItemToObject(json, "tableType", tableType); - cJSON* alterType = cJSON_CreateNumber(vAlterTbReq.action); - cJSON_AddItemToObject(json, "alterType", alterType); - - switch (vAlterTbReq.action) { - case TSDB_ALTER_TABLE_ADD_COLUMN: { - cJSON* colName = cJSON_CreateString(vAlterTbReq.colName); - cJSON_AddItemToObject(json, "colName", colName); - cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type); - cJSON_AddItemToObject(json, "colType", colType); - - if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY) { - int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE; - cJSON* cbytes = cJSON_CreateNumber(length); - cJSON_AddItemToObject(json, "colLength", cbytes); - } else if (vAlterTbReq.type == TSDB_DATA_TYPE_NCHAR) { - int32_t length = (vAlterTbReq.bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; - cJSON* cbytes = cJSON_CreateNumber(length); - cJSON_AddItemToObject(json, "colLength", cbytes); - } - break; - } - case TSDB_ALTER_TABLE_DROP_COLUMN: { - cJSON* colName = cJSON_CreateString(vAlterTbReq.colName); - cJSON_AddItemToObject(json, "colName", colName); - break; - } - case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: { - cJSON* colName = cJSON_CreateString(vAlterTbReq.colName); - cJSON_AddItemToObject(json, "colName", colName); - cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType); - cJSON_AddItemToObject(json, "colType", colType); - if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY) { - int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE; - cJSON* cbytes = cJSON_CreateNumber(length); - cJSON_AddItemToObject(json, "colLength", cbytes); - } else if (vAlterTbReq.colModType == TSDB_DATA_TYPE_NCHAR) { - int32_t length = (vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; - cJSON* cbytes = cJSON_CreateNumber(length); - cJSON_AddItemToObject(json, "colLength", cbytes); - } - break; - } - case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: { - cJSON* colName = cJSON_CreateString(vAlterTbReq.colName); - cJSON_AddItemToObject(json, "colName", colName); - cJSON* colNewName = cJSON_CreateString(vAlterTbReq.colNewName); - cJSON_AddItemToObject(json, "colNewName", colNewName); - break; - } - case TSDB_ALTER_TABLE_UPDATE_TAG_VAL: { - cJSON* tagName = cJSON_CreateString(vAlterTbReq.tagName); - cJSON_AddItemToObject(json, "colName", tagName); - - bool isNull = vAlterTbReq.isNull; - if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) { - STag* jsonTag = (STag*)vAlterTbReq.pTagVal; - if (jsonTag->nTag == 0) isNull = true; - } - if (!isNull) { - char* buf = NULL; - - if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) { - ASSERT(tTagIsJson(vAlterTbReq.pTagVal) == true); - buf = parseTagDatatoJson(vAlterTbReq.pTagVal); - } else { - buf = taosMemoryCalloc(vAlterTbReq.nTagVal + 1, 1); - dataConverToStr(buf, vAlterTbReq.tagType, vAlterTbReq.pTagVal, vAlterTbReq.nTagVal, NULL); - } - - cJSON* colValue = cJSON_CreateString(buf); - cJSON_AddItemToObject(json, "colValue", colValue); - taosMemoryFree(buf); - } - - cJSON* isNullCJson = cJSON_CreateBool(isNull); - cJSON_AddItemToObject(json, "colValueNull", isNullCJson); - break; - } - default: - break; - } - string = cJSON_PrintUnformatted(json); - -_exit: - tDecoderClear(&decoder); - return string; -} - -static char* processDropSTable(SMqMetaRsp* metaRsp) { - SDecoder decoder = {0}; - SVDropStbReq req = {0}; - char* string = NULL; - - // decode - void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead)); - int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead); - tDecoderInit(&decoder, data, len); - if (tDecodeSVDropStbReq(&decoder, &req) < 0) { - goto _exit; - } - - cJSON* json = cJSON_CreateObject(); - if (json == NULL) { - goto _exit; - } - cJSON* type = cJSON_CreateString("drop"); - cJSON_AddItemToObject(json, "type", type); - cJSON* tableName = cJSON_CreateString(req.name); - cJSON_AddItemToObject(json, "tableName", tableName); - cJSON* tableType = cJSON_CreateString("super"); - cJSON_AddItemToObject(json, "tableType", tableType); - - string = cJSON_PrintUnformatted(json); - -_exit: - tDecoderClear(&decoder); - return string; -} - -static char* processDropTable(SMqMetaRsp* metaRsp) { - SDecoder decoder = {0}; - SVDropTbBatchReq req = {0}; - char* string = NULL; - - // decode - void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead)); - int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead); - tDecoderInit(&decoder, data, len); - if (tDecodeSVDropTbBatchReq(&decoder, &req) < 0) { - goto _exit; - } - - cJSON* json = cJSON_CreateObject(); - if (json == NULL) { - goto _exit; - } - cJSON* type = cJSON_CreateString("drop"); - cJSON_AddItemToObject(json, "type", type); - // cJSON* uid = cJSON_CreateNumber(id); - // cJSON_AddItemToObject(json, "uid", uid); - // cJSON* tableType = cJSON_CreateString("normal"); - // cJSON_AddItemToObject(json, "tableType", tableType); - - cJSON* tableNameList = cJSON_CreateArray(); - for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { - SVDropTbReq* pDropTbReq = req.pReqs + iReq; - - cJSON* tableName = cJSON_CreateString(pDropTbReq->name); - cJSON_AddItemToArray(tableNameList, tableName); - } - cJSON_AddItemToObject(json, "tableNameList", tableNameList); - - string = cJSON_PrintUnformatted(json); - -_exit: - tDecoderClear(&decoder); - return string; -} - -static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) { - SVCreateStbReq req = {0}; - SDecoder coder; - SMCreateStbReq pReq = {0}; - int32_t code = TSDB_CODE_SUCCESS; - SRequestObj* pRequest = NULL; - - code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - - if (!pRequest->pDb) { - code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; - goto end; - } - // decode and process req - void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); - int32_t len = metaLen - sizeof(SMsgHead); - tDecoderInit(&coder, data, len); - if (tDecodeSVCreateStbReq(&coder, &req) < 0) { - code = TSDB_CODE_INVALID_PARA; - goto end; - } - // build create stable - pReq.pColumns = taosArrayInit(req.schemaRow.nCols, sizeof(SField)); - for (int32_t i = 0; i < req.schemaRow.nCols; i++) { - SSchema* pSchema = req.schemaRow.pSchema + i; - SField field = {.type = pSchema->type, .bytes = pSchema->bytes}; - strcpy(field.name, pSchema->name); - taosArrayPush(pReq.pColumns, &field); - } - pReq.pTags = taosArrayInit(req.schemaTag.nCols, sizeof(SField)); - for (int32_t i = 0; i < req.schemaTag.nCols; i++) { - SSchema* pSchema = req.schemaTag.pSchema + i; - SField field = {.type = pSchema->type, .bytes = pSchema->bytes}; - strcpy(field.name, pSchema->name); - taosArrayPush(pReq.pTags, &field); - } - - pReq.colVer = req.schemaRow.version; - pReq.tagVer = req.schemaTag.version; - pReq.numOfColumns = req.schemaRow.nCols; - pReq.numOfTags = req.schemaTag.nCols; - pReq.commentLen = -1; - pReq.suid = req.suid; - pReq.source = TD_REQ_FROM_TAOX; - pReq.igExists = true; - - STscObj* pTscObj = pRequest->pTscObj; - SName tableName; - tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name); - - SCmdMsgInfo pCmdMsg = {0}; - pCmdMsg.epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp); - pCmdMsg.msgType = TDMT_MND_CREATE_STB; - pCmdMsg.msgLen = tSerializeSMCreateStbReq(NULL, 0, &pReq); - pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen); - if (NULL == pCmdMsg.pMsg) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - tSerializeSMCreateStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq); - - SQuery pQuery = {0}; - pQuery.execMode = QUERY_EXEC_MODE_RPC; - pQuery.pCmdMsg = &pCmdMsg; - pQuery.msgType = pQuery.pCmdMsg->msgType; - pQuery.stableQuery = true; - - launchQueryImpl(pRequest, &pQuery, true, NULL); - - if (pRequest->code == TSDB_CODE_SUCCESS) { - SCatalog* pCatalog = NULL; - catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); - catalogRemoveTableMeta(pCatalog, &tableName); - } - - code = pRequest->code; - taosMemoryFree(pCmdMsg.pMsg); - -end: - destroyRequest(pRequest); - tFreeSMCreateStbReq(&pReq); - tDecoderClear(&coder); - return code; -} - -static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) { - SVDropStbReq req = {0}; - SDecoder coder; - SMDropStbReq pReq = {0}; - int32_t code = TSDB_CODE_SUCCESS; - SRequestObj* pRequest = NULL; - - code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - - if (!pRequest->pDb) { - code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; - goto end; - } - // decode and process req - void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); - int32_t len = metaLen - sizeof(SMsgHead); - tDecoderInit(&coder, data, len); - if (tDecodeSVDropStbReq(&coder, &req) < 0) { - code = TSDB_CODE_INVALID_PARA; - goto end; - } - - // build drop stable - pReq.igNotExists = true; - pReq.source = TD_REQ_FROM_TAOX; - pReq.suid = req.suid; - - STscObj* pTscObj = pRequest->pTscObj; - SName tableName = {0}; - tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name); - - SCmdMsgInfo pCmdMsg = {0}; - pCmdMsg.epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp); - pCmdMsg.msgType = TDMT_MND_DROP_STB; - pCmdMsg.msgLen = tSerializeSMDropStbReq(NULL, 0, &pReq); - pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen); - if (NULL == pCmdMsg.pMsg) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - tSerializeSMDropStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq); - - SQuery pQuery = {0}; - pQuery.execMode = QUERY_EXEC_MODE_RPC; - pQuery.pCmdMsg = &pCmdMsg; - pQuery.msgType = pQuery.pCmdMsg->msgType; - pQuery.stableQuery = true; - - launchQueryImpl(pRequest, &pQuery, true, NULL); - - if (pRequest->code == TSDB_CODE_SUCCESS) { - SCatalog* pCatalog = NULL; - catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); - catalogRemoveTableMeta(pCatalog, &tableName); - } - - code = pRequest->code; - taosMemoryFree(pCmdMsg.pMsg); - -end: - destroyRequest(pRequest); - tDecoderClear(&coder); - return code; -} - -typedef struct SVgroupCreateTableBatch { - SVCreateTbBatchReq req; - SVgroupInfo info; - char dbName[TSDB_DB_NAME_LEN]; -} SVgroupCreateTableBatch; - -static void destroyCreateTbReqBatch(void* data) { - SVgroupCreateTableBatch* pTbBatch = (SVgroupCreateTableBatch*)data; - taosArrayDestroy(pTbBatch->req.pArray); -} - -static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { - SVCreateTbBatchReq req = {0}; - SDecoder coder = {0}; - int32_t code = TSDB_CODE_SUCCESS; - SRequestObj* pRequest = NULL; - SQuery* pQuery = NULL; - SHashObj* pVgroupHashmap = NULL; - - code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - - if (!pRequest->pDb) { - code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; - goto end; - } - // decode and process req - void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); - int32_t len = metaLen - sizeof(SMsgHead); - tDecoderInit(&coder, data, len); - if (tDecodeSVCreateTbBatchReq(&coder, &req) < 0) { - code = TSDB_CODE_INVALID_PARA; - goto end; - } - - STscObj* pTscObj = pRequest->pTscObj; - - SVCreateTbReq* pCreateReq = NULL; - SCatalog* pCatalog = NULL; - code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - - pVgroupHashmap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); - if (NULL == pVgroupHashmap) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - taosHashSetFreeFp(pVgroupHashmap, destroyCreateTbReqBatch); - - SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; - - pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName)); - // loop to create table - for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { - pCreateReq = req.pReqs + iReq; - - SVgroupInfo pInfo = {0}; - SName pName = {0}; - toName(pTscObj->acctId, pRequest->pDb, pCreateReq->name, &pName); - code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - taosArrayPush(pRequest->tableList, &pName); - - SVgroupCreateTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId)); - if (pTableBatch == NULL) { - SVgroupCreateTableBatch tBatch = {0}; - tBatch.info = pInfo; - strcpy(tBatch.dbName, pRequest->pDb); - - tBatch.req.pArray = taosArrayInit(4, sizeof(struct SVCreateTbReq)); - taosArrayPush(tBatch.req.pArray, pCreateReq); - - taosHashPut(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId), &tBatch, sizeof(tBatch)); - } else { // add to the correct vgroup - taosArrayPush(pTableBatch->req.pArray, pCreateReq); - } - } - - SArray* pBufArray = serializeVgroupsCreateTableBatch(pVgroupHashmap); - if (NULL == pBufArray) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - - pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY); - pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE; - pQuery->msgType = TDMT_VND_CREATE_TABLE; - pQuery->stableQuery = false; - pQuery->pRoot = nodesMakeNode(QUERY_NODE_CREATE_TABLE_STMT); - - code = rewriteToVnodeModifyOpStmt(pQuery, pBufArray); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - - launchQueryImpl(pRequest, pQuery, true, NULL); - if (pRequest->code == TSDB_CODE_SUCCESS) { - removeMeta(pTscObj, pRequest->tableList); - } - - code = pRequest->code; - -end: - taosHashCleanup(pVgroupHashmap); - destroyRequest(pRequest); - tDecoderClear(&coder); - qDestroyQuery(pQuery); - return code; -} - -typedef struct SVgroupDropTableBatch { - SVDropTbBatchReq req; - SVgroupInfo info; - char dbName[TSDB_DB_NAME_LEN]; -} SVgroupDropTableBatch; - -static void destroyDropTbReqBatch(void* data) { - SVgroupDropTableBatch* pTbBatch = (SVgroupDropTableBatch*)data; - taosArrayDestroy(pTbBatch->req.pArray); -} - -static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { - SVDropTbBatchReq req = {0}; - SDecoder coder = {0}; - int32_t code = TSDB_CODE_SUCCESS; - SRequestObj* pRequest = NULL; - SQuery* pQuery = NULL; - SHashObj* pVgroupHashmap = NULL; - - code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - - if (!pRequest->pDb) { - code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; - goto end; - } - // decode and process req - void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); - int32_t len = metaLen - sizeof(SMsgHead); - tDecoderInit(&coder, data, len); - if (tDecodeSVDropTbBatchReq(&coder, &req) < 0) { - code = TSDB_CODE_INVALID_PARA; - goto end; - } - - STscObj* pTscObj = pRequest->pTscObj; - - SVDropTbReq* pDropReq = NULL; - SCatalog* pCatalog = NULL; - code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - - pVgroupHashmap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); - if (NULL == pVgroupHashmap) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - taosHashSetFreeFp(pVgroupHashmap, destroyDropTbReqBatch); - - SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; - pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName)); - // loop to create table - for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { - pDropReq = req.pReqs + iReq; - pDropReq->igNotExists = true; - - SVgroupInfo pInfo = {0}; - SName pName = {0}; - toName(pTscObj->acctId, pRequest->pDb, pDropReq->name, &pName); - code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - - taosArrayPush(pRequest->tableList, &pName); - SVgroupDropTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId)); - if (pTableBatch == NULL) { - SVgroupDropTableBatch tBatch = {0}; - tBatch.info = pInfo; - tBatch.req.pArray = taosArrayInit(TARRAY_MIN_SIZE, sizeof(SVDropTbReq)); - taosArrayPush(tBatch.req.pArray, pDropReq); - - taosHashPut(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId), &tBatch, sizeof(tBatch)); - } else { // add to the correct vgroup - taosArrayPush(pTableBatch->req.pArray, pDropReq); - } - } - - SArray* pBufArray = serializeVgroupsDropTableBatch(pVgroupHashmap); - if (NULL == pBufArray) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - - pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY); - pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE; - pQuery->msgType = TDMT_VND_DROP_TABLE; - pQuery->stableQuery = false; - pQuery->pRoot = nodesMakeNode(QUERY_NODE_DROP_TABLE_STMT); - - code = rewriteToVnodeModifyOpStmt(pQuery, pBufArray); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - - launchQueryImpl(pRequest, pQuery, true, NULL); - if (pRequest->code == TSDB_CODE_SUCCESS) { - removeMeta(pTscObj, pRequest->tableList); - } - code = pRequest->code; - -end: - taosHashCleanup(pVgroupHashmap); - destroyRequest(pRequest); - tDecoderClear(&coder); - qDestroyQuery(pQuery); - return code; -} - -// delete from db.tabl where .. -> delete from tabl where .. -// delete from db .tabl where .. -> delete from tabl where .. -// static void getTbName(char *sql){ -// char *ch = sql; -// -// bool inBackQuote = false; -// int8_t dotIndex = 0; -// while(*ch != '\0'){ -// if(!inBackQuote && *ch == '`'){ -// inBackQuote = true; -// ch++; -// continue; -// } -// -// if(inBackQuote && *ch == '`'){ -// inBackQuote = false; -// ch++; -// -// continue; -// } -// -// if(!inBackQuote && *ch == '.'){ -// dotIndex ++; -// if(dotIndex == 2){ -// memmove(sql, ch + 1, strlen(ch + 1) + 1); -// break; -// } -// } -// ch++; -// } -//} - -static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) { - SDeleteRes req = {0}; - SDecoder coder = {0}; - int32_t code = TSDB_CODE_SUCCESS; - - // decode and process req - void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); - int32_t len = metaLen - sizeof(SMsgHead); - tDecoderInit(&coder, data, len); - if (tDecodeDeleteRes(&coder, &req) < 0) { - code = TSDB_CODE_INVALID_PARA; - goto end; - } - - // getTbName(req.tableFName); - char sql[256] = {0}; - sprintf(sql, "delete from `%s` where `%s` >= %" PRId64 " and `%s` <= %" PRId64, req.tableFName, req.tsColName, - req.skey, req.tsColName, req.ekey); - printf("delete sql:%s\n", sql); - - TAOS_RES* res = taos_query(taos, sql); - SRequestObj* pRequest = (SRequestObj*)res; - code = pRequest->code; - if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) { - code = TSDB_CODE_SUCCESS; - } - taos_free_result(res); - -end: - tDecoderClear(&coder); - return code; -} - -static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) { - SVAlterTbReq req = {0}; - SDecoder coder = {0}; - int32_t code = TSDB_CODE_SUCCESS; - SRequestObj* pRequest = NULL; - SQuery* pQuery = NULL; - SArray* pArray = NULL; - SVgDataBlocks* pVgData = NULL; - - code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest); - - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - - if (!pRequest->pDb) { - code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; - goto end; - } - // decode and process req - void* data = POINTER_SHIFT(meta, sizeof(SMsgHead)); - int32_t len = metaLen - sizeof(SMsgHead); - tDecoderInit(&coder, data, len); - if (tDecodeSVAlterTbReq(&coder, &req) < 0) { - code = TSDB_CODE_INVALID_PARA; - goto end; - } - - // do not deal TSDB_ALTER_TABLE_UPDATE_OPTIONS - if (req.action == TSDB_ALTER_TABLE_UPDATE_OPTIONS) { - goto end; - } - - STscObj* pTscObj = pRequest->pTscObj; - SCatalog* pCatalog = NULL; - code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - - SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; - - SVgroupInfo pInfo = {0}; - SName pName = {0}; - toName(pTscObj->acctId, pRequest->pDb, req.tbName, &pName); - code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - - pArray = taosArrayInit(1, sizeof(void*)); - if (NULL == pArray) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - - pVgData = taosMemoryCalloc(1, sizeof(SVgDataBlocks)); - if (NULL == pVgData) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - pVgData->vg = pInfo; - pVgData->pData = taosMemoryMalloc(metaLen); - if (NULL == pVgData->pData) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - memcpy(pVgData->pData, meta, metaLen); - ((SMsgHead*)pVgData->pData)->vgId = htonl(pInfo.vgId); - pVgData->size = metaLen; - pVgData->numOfTables = 1; - taosArrayPush(pArray, &pVgData); - - pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY); - pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE; - pQuery->msgType = TDMT_VND_ALTER_TABLE; - pQuery->stableQuery = false; - pQuery->pRoot = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT); - - code = rewriteToVnodeModifyOpStmt(pQuery, pArray); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - - launchQueryImpl(pRequest, pQuery, true, NULL); - - pVgData = NULL; - pArray = NULL; - code = pRequest->code; - if (code == TSDB_CODE_VND_TABLE_NOT_EXIST) { - code = TSDB_CODE_SUCCESS; - } - - if (pRequest->code == TSDB_CODE_SUCCESS) { - SExecResult* pRes = &pRequest->body.resInfo.execRes; - if (pRes->res != NULL) { - code = handleAlterTbExecRes(pRes->res, pCatalog); - } - } -end: - taosArrayDestroy(pArray); - if (pVgData) taosMemoryFreeClear(pVgData->pData); - taosMemoryFreeClear(pVgData); - destroyRequest(pRequest); - tDecoderClear(&coder); - qDestroyQuery(pQuery); - return code; -} - -typedef struct { - SVgroupInfo vg; - void* data; -} VgData; - -static void destroyVgHash(void* data) { - VgData* vgData = (VgData*)data; - taosMemoryFreeClear(vgData->data); -} - -int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname) { - int32_t code = TSDB_CODE_SUCCESS; - STableMeta* pTableMeta = NULL; - SQuery* pQuery = NULL; - - SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT); - if (!pRequest) { - uError("WriteRaw:createRequest error request is null"); - code = terrno; - goto end; - } - - if (!pRequest->pDb) { - uError("WriteRaw:not use db"); - code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; - goto end; - } - - SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; - strcpy(pName.dbname, pRequest->pDb); - strcpy(pName.tname, tbname); - - struct SCatalog* pCatalog = NULL; - code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog); - if (code != TSDB_CODE_SUCCESS) { - uError("WriteRaw: get gatlog error"); - goto end; - } - - SRequestConnInfo conn = {0}; - conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter; - conn.requestId = pRequest->requestId; - conn.requestObjRefId = pRequest->self; - conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp); - - SVgroupInfo vgData = {0}; - code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vgData); - if (code != TSDB_CODE_SUCCESS) { - uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbname); - goto end; - } - - code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta); - if (code != TSDB_CODE_SUCCESS) { - uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbname); - goto end; - } - uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid); - uint64_t uid = pTableMeta->uid; - int32_t numOfCols = pTableMeta->tableInfo.numOfColumns; - - uint16_t fLen = 0; - int32_t rowSize = 0; - int16_t nVar = 0; - for (int i = 0; i < numOfCols; i++) { - SSchema* schema = pTableMeta->schema + i; - fLen += TYPE_BYTES[schema->type]; - rowSize += schema->bytes; - if (IS_VAR_DATA_TYPE(schema->type)) { - nVar++; - } - } - - int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) + - (int32_t)TD_BITMAP_BYTES(numOfCols - 1); - int32_t schemaLen = 0; - int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize; - - int32_t totalLen = sizeof(SSubmitReq) + submitLen; - SSubmitReq* subReq = taosMemoryCalloc(1, totalLen); - SSubmitBlk* blk = POINTER_SHIFT(subReq, sizeof(SSubmitReq)); - void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk)); - STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen); - - SRowBuilder rb = {0}; - tdSRowInit(&rb, pTableMeta->sversion); - tdSRowSetTpInfo(&rb, numOfCols, fLen); - int32_t dataLen = 0; - - char* pStart = pData + getVersion1BlockMetaSize(pData, numOfCols); - int32_t* colLength = (int32_t*)pStart; - pStart += sizeof(int32_t) * numOfCols; - - SResultColumn* pCol = taosMemoryCalloc(numOfCols, sizeof(SResultColumn)); - - for (int32_t i = 0; i < numOfCols; ++i) { - if (IS_VAR_DATA_TYPE(pTableMeta->schema[i].type)) { - pCol[i].offset = (int32_t*)pStart; - pStart += rows * sizeof(int32_t); - } else { - pCol[i].nullbitmap = pStart; - pStart += BitmapLen(rows); - } - - pCol[i].pData = pStart; - pStart += colLength[i]; - } - - for (int32_t j = 0; j < rows; j++) { - tdSRowResetBuf(&rb, rowData); - int32_t offset = 0; - for (int32_t k = 0; k < numOfCols; k++) { - const SSchema* pColumn = &pTableMeta->schema[k]; - - if (IS_VAR_DATA_TYPE(pColumn->type)) { - if (pCol[k].offset[j] != -1) { - char* data = pCol[k].pData + pCol[k].offset[j]; - tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k); - } else { - - tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k); - } - } else { - if (!colDataIsNull_f(pCol[k].nullbitmap, j)) { - char* data = pCol[k].pData + pColumn->bytes * j; - tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k); - } else { - tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k); - } - } - - offset += TYPE_BYTES[pColumn->type]; - } - tdSRowEnd(&rb); - int32_t rowLen = TD_ROW_LEN(rowData); - rowData = POINTER_SHIFT(rowData, rowLen); - dataLen += rowLen; - } - - taosMemoryFree(pCol); - - blk->uid = htobe64(uid); - blk->suid = htobe64(suid); - blk->sversion = htonl(pTableMeta->sversion); - blk->schemaLen = htonl(schemaLen); - blk->numOfRows = htonl(rows); - blk->dataLen = htonl(dataLen); - subReq->length = sizeof(SSubmitReq) + sizeof(SSubmitBlk) + schemaLen + dataLen; - subReq->numOfBlocks = 1; - - pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY); - if (NULL == pQuery) { - uError("create SQuery error"); - code = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE; - pQuery->haveResultSet = false; - pQuery->msgType = TDMT_VND_SUBMIT; - pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT); - if (NULL == pQuery->pRoot) { - uError("create pQuery->pRoot error"); - code = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot); - nodeStmt->payloadType = PAYLOAD_TYPE_KV; - nodeStmt->pDataBlocks = taosArrayInit(1, POINTER_BYTES); - - SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks)); - if (NULL == dst) { - code = TSDB_CODE_TSC_OUT_OF_MEMORY; - goto end; - } - dst->vg = vgData; - dst->numOfTables = subReq->numOfBlocks; - dst->size = subReq->length; - dst->pData = (char*)subReq; - subReq->header.vgId = htonl(dst->vg.vgId); - subReq->version = htonl(1); - subReq->header.contLen = htonl(subReq->length); - subReq->length = htonl(subReq->length); - subReq->numOfBlocks = htonl(subReq->numOfBlocks); - subReq = NULL; // no need free - taosArrayPush(nodeStmt->pDataBlocks, &dst); - - launchQueryImpl(pRequest, pQuery, true, NULL); - code = pRequest->code; - -end: - taosMemoryFreeClear(pTableMeta); - qDestroyQuery(pQuery); - return code; -} - -static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) { - int32_t code = TSDB_CODE_SUCCESS; - SHashObj* pVgHash = NULL; - SQuery* pQuery = NULL; - SMqRspObj rspObj = {0}; - SDecoder decoder = {0}; - - terrno = TSDB_CODE_SUCCESS; - SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT); - if (!pRequest) { - uError("WriteRaw:createRequest error request is null"); - return terrno; - } - - rspObj.resIter = -1; - rspObj.resType = RES_TYPE__TMQ; - - tDecoderInit(&decoder, data, dataLen); - code = tDecodeSMqDataRsp(&decoder, &rspObj.rsp); - if (code != 0) { - uError("WriteRaw:decode smqDataRsp error"); - code = TSDB_CODE_INVALID_MSG; - goto end; - } - - if (!pRequest->pDb) { - uError("WriteRaw:not use db"); - code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; - goto end; - } - - pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); - taosHashSetFreeFp(pVgHash, destroyVgHash); - struct SCatalog* pCatalog = NULL; - code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog); - if (code != TSDB_CODE_SUCCESS) { - uError("WriteRaw: get gatlog error"); - goto end; - } - - SRequestConnInfo conn = {0}; - conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter; - conn.requestId = pRequest->requestId; - conn.requestObjRefId = pRequest->self; - conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp); - - printf("raw data block num:%d\n", rspObj.rsp.blockNum); - while (++rspObj.resIter < rspObj.rsp.blockNum) { - SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter); - if (!rspObj.rsp.withSchema) { - uError("WriteRaw:no schema, iter:%d", rspObj.resIter); - goto end; - } - SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.rsp.blockSchema, rspObj.resIter); - setResSchemaInfo(&rspObj.resInfo, pSW->pSchema, pSW->nCols); - - code = setQueryResultFromRsp(&rspObj.resInfo, pRetrieve, false, false); - if (code != TSDB_CODE_SUCCESS) { - uError("WriteRaw: setQueryResultFromRsp error"); - goto end; - } - - uint16_t fLen = 0; - int32_t rowSize = 0; - int16_t nVar = 0; - for (int i = 0; i < pSW->nCols; i++) { - SSchema* schema = pSW->pSchema + i; - fLen += TYPE_BYTES[schema->type]; - rowSize += schema->bytes; - if (IS_VAR_DATA_TYPE(schema->type)) { - nVar++; - } - } - - int32_t rows = rspObj.resInfo.numOfRows; - int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) + - (int32_t)TD_BITMAP_BYTES(pSW->nCols - 1); - int32_t schemaLen = 0; - int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize; - - const char* tbName = (const char*)taosArrayGetP(rspObj.rsp.blockTbName, rspObj.resIter); - if (!tbName) { - uError("WriteRaw: tbname is null"); - code = TSDB_CODE_TMQ_INVALID_MSG; - goto end; - } - - printf("raw data tbname:%s\n", tbName); - SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; - strcpy(pName.dbname, pRequest->pDb); - strcpy(pName.tname, tbName); - - VgData vgData = {0}; - code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &(vgData.vg)); - if (code != TSDB_CODE_SUCCESS) { - uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbName); - goto end; - } - - SSubmitReq* subReq = NULL; - SSubmitBlk* blk = NULL; - void* hData = taosHashGet(pVgHash, &vgData.vg.vgId, sizeof(vgData.vg.vgId)); - if (hData) { - vgData = *(VgData*)hData; - - int32_t totalLen = ((SSubmitReq*)(vgData.data))->length + submitLen; - void* tmp = taosMemoryRealloc(vgData.data, totalLen); - if (tmp == NULL) { - code = TSDB_CODE_TSC_OUT_OF_MEMORY; - goto end; - } - vgData.data = tmp; - ((VgData*)hData)->data = tmp; - subReq = (SSubmitReq*)(vgData.data); - blk = POINTER_SHIFT(vgData.data, subReq->length); - } else { - int32_t totalLen = sizeof(SSubmitReq) + submitLen; - void* tmp = taosMemoryCalloc(1, totalLen); - if (tmp == NULL) { - code = TSDB_CODE_TSC_OUT_OF_MEMORY; - goto end; - } - vgData.data = tmp; - taosHashPut(pVgHash, (const char*)&vgData.vg.vgId, sizeof(vgData.vg.vgId), (char*)&vgData, sizeof(vgData)); - subReq = (SSubmitReq*)(vgData.data); - subReq->length = sizeof(SSubmitReq); - subReq->numOfBlocks = 0; - - blk = POINTER_SHIFT(vgData.data, sizeof(SSubmitReq)); - } - - STableMeta* pTableMeta = NULL; - code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta); - if (code != TSDB_CODE_SUCCESS) { - uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbName); - goto end; - } - uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid); - uint64_t uid = pTableMeta->uid; - taosMemoryFreeClear(pTableMeta); - - void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk)); - STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen); - - SRowBuilder rb = {0}; - tdSRowInit(&rb, pSW->version); - tdSRowSetTpInfo(&rb, pSW->nCols, fLen); - int32_t dataLen = 0; - - for (int32_t j = 0; j < rows; j++) { - tdSRowResetBuf(&rb, rowData); - - doSetOneRowPtr(&rspObj.resInfo); - rspObj.resInfo.current += 1; - - int32_t offset = 0; - for (int32_t k = 0; k < pSW->nCols; k++) { - const SSchema* pColumn = &pSW->pSchema[k]; - char* data = rspObj.resInfo.row[k]; - if (!data) { - tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k); - } else { - if (IS_VAR_DATA_TYPE(pColumn->type)) { - data -= VARSTR_HEADER_SIZE; - } - tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k); - } - offset += TYPE_BYTES[pColumn->type]; - } - tdSRowEnd(&rb); - int32_t rowLen = TD_ROW_LEN(rowData); - rowData = POINTER_SHIFT(rowData, rowLen); - dataLen += rowLen; - } - - blk->uid = htobe64(uid); - blk->suid = htobe64(suid); - blk->sversion = htonl(pSW->version); - blk->schemaLen = htonl(schemaLen); - blk->numOfRows = htonl(rows); - blk->dataLen = htonl(dataLen); - subReq->length += sizeof(SSubmitBlk) + schemaLen + dataLen; - subReq->numOfBlocks++; - } - - pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY); - if (NULL == pQuery) { - uError("create SQuery error"); - code = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE; - pQuery->haveResultSet = false; - pQuery->msgType = TDMT_VND_SUBMIT; - pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT); - if (NULL == pQuery->pRoot) { - uError("create pQuery->pRoot error"); - code = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot); - nodeStmt->payloadType = PAYLOAD_TYPE_KV; - - int32_t numOfVg = taosHashGetSize(pVgHash); - nodeStmt->pDataBlocks = taosArrayInit(numOfVg, POINTER_BYTES); - - VgData* vData = (VgData*)taosHashIterate(pVgHash, NULL); - while (vData) { - SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks)); - if (NULL == dst) { - code = TSDB_CODE_TSC_OUT_OF_MEMORY; - goto end; - } - dst->vg = vData->vg; - SSubmitReq* subReq = (SSubmitReq*)(vData->data); - dst->numOfTables = subReq->numOfBlocks; - dst->size = subReq->length; - dst->pData = (char*)subReq; - vData->data = NULL; // no need free - subReq->header.vgId = htonl(dst->vg.vgId); - subReq->version = htonl(1); - subReq->header.contLen = htonl(subReq->length); - subReq->length = htonl(subReq->length); - subReq->numOfBlocks = htonl(subReq->numOfBlocks); - taosArrayPush(nodeStmt->pDataBlocks, &dst); - vData = (VgData*)taosHashIterate(pVgHash, vData); - } - - launchQueryImpl(pRequest, pQuery, true, NULL); - code = pRequest->code; - -end: - tDecoderClear(&decoder); - taos_free_result(&rspObj); - qDestroyQuery(pQuery); - destroyRequest(pRequest); - taosHashCleanup(pVgHash); - return code; -} - -char* tmq_get_json_meta(TAOS_RES* res) { - if (!TD_RES_TMQ_META(res)) { - return NULL; - } - - SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res; - if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_STB) { - return processCreateStb(&pMetaRspObj->metaRsp); - } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_STB) { - return processAlterStb(&pMetaRspObj->metaRsp); - } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_STB) { - return processDropSTable(&pMetaRspObj->metaRsp); - } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_TABLE) { - return processCreateTable(&pMetaRspObj->metaRsp); - } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_TABLE) { - return processAlterTable(&pMetaRspObj->metaRsp); - } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_TABLE) { - return processDropTable(&pMetaRspObj->metaRsp); - } - return NULL; -} - -void tmq_free_json_meta(char* jsonMeta) { taosMemoryFreeClear(jsonMeta); } - -int32_t tmq_get_raw(TAOS_RES* res, tmq_raw_data* raw) { - if (!raw || !res) { - return TSDB_CODE_INVALID_PARA; - } - if (TD_RES_TMQ_META(res)) { - SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res; - raw->raw = pMetaRspObj->metaRsp.metaRsp; - raw->raw_len = pMetaRspObj->metaRsp.metaRspLen; - raw->raw_type = pMetaRspObj->metaRsp.resMsgType; - } else if (TD_RES_TMQ(res)) { - SMqRspObj* rspObj = ((SMqRspObj*)res); - - int32_t len = 0; - int32_t code = 0; - tEncodeSize(tEncodeSMqDataRsp, &rspObj->rsp, len, code); - if (code < 0) { - return -1; - } - - void* buf = taosMemoryCalloc(1, len); - SEncoder encoder = {0}; - tEncoderInit(&encoder, buf, len); - tEncodeSMqDataRsp(&encoder, &rspObj->rsp); - tEncoderClear(&encoder); - - raw->raw = buf; - raw->raw_len = len; - raw->raw_type = RES_TYPE__TMQ; - } else { - return TSDB_CODE_TMQ_INVALID_MSG; - } - return TSDB_CODE_SUCCESS; -} - -void tmq_free_raw(tmq_raw_data raw) { - if (raw.raw_type == RES_TYPE__TMQ) { - taosMemoryFree(raw.raw); - } -} - -int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) { - if (!taos) { - return TSDB_CODE_INVALID_PARA; - } - - if (raw.raw_type == TDMT_VND_CREATE_STB) { - return taosCreateStb(taos, raw.raw, raw.raw_len); - } else if (raw.raw_type == TDMT_VND_ALTER_STB) { - return taosCreateStb(taos, raw.raw, raw.raw_len); - } else if (raw.raw_type == TDMT_VND_DROP_STB) { - return taosDropStb(taos, raw.raw, raw.raw_len); - } else if (raw.raw_type == TDMT_VND_CREATE_TABLE) { - return taosCreateTable(taos, raw.raw, raw.raw_len); - } else if (raw.raw_type == TDMT_VND_ALTER_TABLE) { - return taosAlterTable(taos, raw.raw, raw.raw_len); - } else if (raw.raw_type == TDMT_VND_DROP_TABLE) { - return taosDropTable(taos, raw.raw, raw.raw_len); - } else if (raw.raw_type == TDMT_VND_DELETE) { - return taosDeleteData(taos, raw.raw, raw.raw_len); - } else if (raw.raw_type == RES_TYPE__TMQ) { - return tmqWriteRaw(taos, raw.raw, raw.raw_len); - } - return TSDB_CODE_INVALID_PARA; -} - void tmq_commit_async(tmq_t* tmq, const TAOS_RES* msg, tmq_commit_cb* cb, void* param) { // tmqCommitInner(tmq, msg, 0, 1, cb, param); diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp index ec270889e2b326ab6481005ece006278c686da78..4ea5443678d8b927f8df831874f32b20655cb7d0 100644 --- a/source/client/test/clientTests.cpp +++ b/source/client/test/clientTests.cpp @@ -123,7 +123,7 @@ void createNewTable(TAOS* pConn, int32_t index) { } taos_free_result(pRes); - for(int32_t i = 0; i < 100000; i += 20) { + for(int32_t i = 0; i < 3280; i += 20) { char sql[1024] = {0}; sprintf(sql, "insert into tu%d values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)" @@ -679,30 +679,28 @@ TEST(testCase, projection_query_tables) { TAOS_RES* pRes = taos_query(pConn, "use abc1"); taos_free_result(pRes); - pRes = taos_query(pConn, "explain verbose true select _wstart,count(*),a from st1 partition by a interval(1s)"); - printResult(pRes); -// pRes = taos_query(pConn, "create stable st1 (ts timestamp, k int) tags(a int)"); -// if (taos_errno(pRes) != 0) { -// printf("failed to create table tu, reason:%s\n", taos_errstr(pRes)); -// } -// taos_free_result(pRes); -// -// pRes = taos_query(pConn, "create stable st2 (ts timestamp, k int) tags(a int)"); -// if (taos_errno(pRes) != 0) { -// printf("failed to create table tu, reason:%s\n", taos_errstr(pRes)); -// } -// taos_free_result(pRes); -// -// pRes = taos_query(pConn, "create table tu using st1 tags(1)"); -// if (taos_errno(pRes) != 0) { -// printf("failed to create table tu, reason:%s\n", taos_errstr(pRes)); -// } -// taos_free_result(pRes); -// -// for(int32_t i = 0; i < 1; ++i) { -// printf("create table :%d\n", i); -// createNewTable(pConn, i); -// } + pRes = taos_query(pConn, "create stable st1 (ts timestamp, k int) tags(a int)"); + if (taos_errno(pRes) != 0) { + printf("failed to create table tu, reason:%s\n", taos_errstr(pRes)); + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create stable st2 (ts timestamp, k int) tags(a int)"); + if (taos_errno(pRes) != 0) { + printf("failed to create table tu, reason:%s\n", taos_errstr(pRes)); + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "create table tu using st1 tags(1)"); + if (taos_errno(pRes) != 0) { + printf("failed to create table tu, reason:%s\n", taos_errstr(pRes)); + } + taos_free_result(pRes); + + for(int32_t i = 0; i < 2; ++i) { + printf("create table :%d\n", i); + createNewTable(pConn, i); + } // // pRes = taos_query(pConn, "select * from tu"); // if (taos_errno(pRes) != 0) { diff --git a/source/client/test/smlTest.cpp b/source/client/test/smlTest.cpp index 68a8b9d336ae49e34c3dab28d3fdad6d3f27e9d4..b62238ccf26c991a516313270889a05a5b87d6ee 100644 --- a/source/client/test/smlTest.cpp +++ b/source/client/test/smlTest.cpp @@ -692,3 +692,52 @@ TEST(testCase, smlParseTelnetLine_diff_json_type2_Test) { ASSERT_NE(ret, 0); smlDestroyInfo(info); } + +TEST(testCase, sml_col_4096_Test) { + SSmlHandle *info = smlBuildSmlInfo(NULL, NULL, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS); + ASSERT_NE(info, nullptr); + + const char *sql[] = { + "spgwgvldxv,id=spgwgvldxv_1,t0=f c0=t,c1=t,c2=t,c3=t,c4=t,c5=t,c6=t,c7=t,c8=t,c9=t,c10=t,c11=t,c12=t,c13=t,c14=t,c15=t,c16=t,c17=t,c18=t,c19=t,c20=t,c21=t,c22=t,c23=t,c24=t,c25=t,c26=t,c27=t,c28=t,c29=t,c30=t,c31=t,c32=t,c33=t,c34=t,c35=t,c36=t,c37=t,c38=t,c39=t,c40=t,c41=t,c42=t,c43=t,c44=t,c45=t,c46=t,c47=t,c48=t,c49=t,c50=t,c51=t,c52=t,c53=t,c54=t,c55=t,c56=t,c57=t,c58=t,c59=t,c60=t,c61=t,c62=t,c63=t,c64=t,c65=t,c66=t,c67=t,c68=t,c69=t,c70=t,c71=t,c72=t,c73=t,c74=t,c75=t,c76=t,c77=t,c78=t,c79=t,c80=t,c81=t,c82=t,c83=t,c84=t,c85=t,c86=t,c87=t,c88=t,c89=t,c90=t,c91=t,c92=t,c93=t,c94=t,c95=t,c96=t,c97=t,c98=t,c99=t,c100=t," + "c101=t,c102=t,c103=t,c104=t,c105=t,c106=t,c107=t,c108=t,c109=t,c110=t,c111=t,c112=t,c113=t,c114=t,c115=t,c116=t,c117=t,c118=t,c119=t,c120=t,c121=t,c122=t,c123=t,c124=t,c125=t,c126=t,c127=t,c128=t,c129=t,c130=t,c131=t,c132=t,c133=t,c134=t,c135=t,c136=t,c137=t,c138=t,c139=t,c140=t,c141=t,c142=t,c143=t,c144=t,c145=t,c146=t,c147=t,c148=t,c149=t,c150=t,c151=t,c152=t,c153=t,c154=t,c155=t,c156=t,c157=t,c158=t,c159=t,c160=t,c161=t,c162=t,c163=t,c164=t,c165=t,c166=t,c167=t,c168=t,c169=t,c170=t,c171=t,c172=t,c173=t,c174=t,c175=t,c176=t,c177=t,c178=t,c179=t,c180=t,c181=t,c182=t,c183=t,c184=t,c185=t,c186=t,c187=t,c188=t,c189=t," + "c190=t,c191=t,c192=t,c193=t,c194=t,c195=t,c196=t,c197=t,c198=t,c199=t,c200=t,c201=t,c202=t,c203=t,c204=t,c205=t,c206=t,c207=t,c208=t,c209=t,c210=t,c211=t,c212=t,c213=t,c214=t,c215=t,c216=t,c217=t,c218=t,c219=t,c220=t,c221=t,c222=t,c223=t,c224=t,c225=t,c226=t,c227=t,c228=t,c229=t,c230=t,c231=t,c232=t,c233=t,c234=t,c235=t,c236=t,c237=t,c238=t,c239=t,c240=t,c241=t,c242=t,c243=t,c244=t,c245=t,c246=t,c247=t,c248=t,c249=t,c250=t,c251=t,c252=t,c253=t,c254=t,c255=t,c256=t,c257=t,c258=t,c259=t,c260=t,c261=t,c262=t,c263=t,c264=t,c265=t,c266=t,c267=t,c268=t,c269=t,c270=t,c271=t,c272=t,c273=t,c274=t,c275=t,c276=t,c277=t,c278=t," + "c279=t,c280=t,c281=t,c282=t,c283=t,c284=t,c285=t,c286=t,c287=t,c288=t,c289=t,c290=t,c291=t,c292=t,c293=t,c294=t,c295=t,c296=t,c297=t,c298=t,c299=t,c300=t,c301=t,c302=t,c303=t,c304=t,c305=t,c306=t,c307=t,c308=t,c309=t,c310=t,c311=t,c312=t,c313=t,c314=t,c315=t,c316=t,c317=t,c318=t,c319=t,c320=t,c321=t,c322=t,c323=t,c324=t,c325=t,c326=t,c327=t,c328=t,c329=t,c330=t,c331=t,c332=t,c333=t,c334=t,c335=t,c336=t,c337=t,c338=t,c339=t,c340=t,c341=t,c342=t,c343=t,c344=t,c345=t,c346=t,c347=t,c348=t,c349=t,c350=t,c351=t,c352=t,c353=t,c354=t,c355=t,c356=t,c357=t,c358=t,c359=t,c360=t,c361=t,c362=t,c363=t,c364=t,c365=t,c366=t,c367=t,c368=t,c369=t,c370=t,c371=t,c372=t,c373=t,c374=t,c375=t,c376=t,c377=t,c378=t,c379=t,c380=t,c381=t,c382=t,c383=t,c384=t,c385=t,c386=t,c387=t,c388=t,c389=t,c390=t,c391=t,c392=t,c393=t,c394=t,c395=t,c396=t,c397=t,c398=t,c399=t,c400=t,c401=t,c402=t,c403=t,c404=t,c405=t,c406=t,c407=t,c408=t,c409=t,c410=t,c411=t,c412=t,c413=t,c414=t,c415=t,c416=t,c417=t,c418=t,c419=t,c420=t,c421=t,c422=t,c423=t,c424=t,c425=t,c426=t,c427=t,c428=t,c429=t,c430=t,c431=t,c432=t,c433=t,c434=t,c435=t,c436=t,c437=t,c438=t,c439=t,c440=t,c441=t,c442=t,c443=t,c444=t,c445=t,c446=t," + "c447=t,c448=t,c449=t,c450=t,c451=t,c452=t,c453=t,c454=t,c455=t,c456=t,c457=t,c458=t,c459=t,c460=t,c461=t,c462=t,c463=t,c464=t,c465=t,c466=t,c467=t,c468=t,c469=t,c470=t,c471=t,c472=t,c473=t,c474=t,c475=t,c476=t,c477=t,c478=t,c479=t,c480=t,c481=t,c482=t,c483=t,c484=t,c485=t,c486=t,c487=t,c488=t,c489=t,c490=t,c491=t,c492=t,c493=t,c494=t,c495=t,c496=t,c497=t,c498=t,c499=t,c500=t,c501=t,c502=t,c503=t,c504=t,c505=t,c506=t,c507=t,c508=t,c509=t,c510=t,c511=t,c512=t,c513=t,c514=t,c515=t,c516=t,c517=t,c518=t,c519=t,c520=t,c521=t,c522=t,c523=t,c524=t,c525=t,c526=t,c527=t,c528=t,c529=t,c530=t,c531=t,c532=t,c533=t,c534=t,c535=t,c536=t,c537=t,c538=t,c539=t,c540=t,c541=t,c542=t,c543=t,c544=t,c545=t,c546=t,c547=t,c548=t,c549=t,c550=t,c551=t,c552=t,c553=t,c554=t,c555=t,c556=t,c557=t,c558=t,c559=t,c560=t,c561=t,c562=t,c563=t,c564=t,c565=t,c566=t,c567=t,c568=t,c569=t,c570=t,c571=t,c572=t,c573=t,c574=t,c575=t,c576=t,c577=t,c578=t,c579=t,c580=t,c581=t,c582=t,c583=t,c584=t,c585=t,c586=t,c587=t,c588=t,c589=t,c590=t,c591=t,c592=t,c593=t,c594=t,c595=t,c596=t,c597=t,c598=t,c599=t,c600=t,c601=t,c602=t,c603=t,c604=t,c605=t,c606=t,c607=t,c608=t,c609=t,c610=t,c611=t,c612=t,c613=t,c614=t," + "c615=t,c616=t,c617=t,c618=t,c619=t,c620=t,c621=t,c622=t,c623=t,c624=t,c625=t,c626=t,c627=t,c628=t,c629=t,c630=t,c631=t,c632=t,c633=t,c634=t,c635=t,c636=t,c637=t,c638=t,c639=t,c640=t,c641=t,c642=t,c643=t,c644=t,c645=t,c646=t,c647=t,c648=t,c649=t,c650=t,c651=t,c652=t,c653=t,c654=t,c655=t,c656=t,c657=t,c658=t,c659=t,c660=t,c661=t,c662=t,c663=t,c664=t,c665=t,c666=t,c667=t,c668=t,c669=t,c670=t,c671=t,c672=t,c673=t,c674=t,c675=t,c676=t,c677=t,c678=t,c679=t,c680=t,c681=t,c682=t,c683=t,c684=t,c685=t,c686=t,c687=t,c688=t,c689=t,c690=t,c691=t,c692=t,c693=t,c694=t,c695=t,c696=t,c697=t,c698=t,c699=t,c700=t,c701=t,c702=t,c703=t,c704=t,c705=t,c706=t,c707=t,c708=t,c709=t,c710=t,c711=t,c712=t,c713=t,c714=t,c715=t,c716=t,c717=t,c718=t,c719=t,c720=t,c721=t,c722=t,c723=t,c724=t,c725=t,c726=t,c727=t,c728=t,c729=t,c730=t,c731=t,c732=t,c733=t,c734=t,c735=t,c736=t,c737=t,c738=t,c739=t,c740=t,c741=t,c742=t,c743=t,c744=t,c745=t,c746=t,c747=t,c748=t,c749=t,c750=t,c751=t,c752=t,c753=t,c754=t,c755=t,c756=t,c757=t,c758=t,c759=t,c760=t,c761=t,c762=t,c763=t,c764=t,c765=t,c766=t,c767=t,c768=t,c769=t,c770=t,c771=t,c772=t,c773=t,c774=t,c775=t,c776=t,c777=t,c778=t,c779=t,c780=t,c781=t,c782=t," + "c783=t,c784=t,c785=t,c786=t,c787=t,c788=t,c789=t,c790=t,c791=t,c792=t,c793=t,c794=t,c795=t,c796=t,c797=t,c798=t,c799=t,c800=t,c801=t,c802=t,c803=t,c804=t,c805=t,c806=t,c807=t,c808=t,c809=t,c810=t,c811=t,c812=t,c813=t," + "c814=t,c815=t,c816=t,c817=t,c818=t,c819=t,c820=t,c821=t,c822=t,c823=t,c824=t,c825=t,c826=t,c827=t,c828=t,c829=t,c830=t,c831=t,c832=t,c833=t,c834=t,c835=t,c836=t,c837=t,c838=t,c839=t,c840=t,c841=t,c842=t,c843=t,c844=t,c845=t,c846=t,c847=t,c848=t,c849=t,c850=t,c851=t,c852=t,c853=t,c854=t,c855=t,c856=t,c857=t,c858=t,c859=t,c860=t,c861=t,c862=t," + "c863=t,c864=t,c865=t,c866=t,c867=t,c868=t,c869=t,c870=t,c871=t,c872=t,c873=t,c874=t,c875=t,c876=t,c877=t,c878=t,c879=t,c880=t,c881=t,c882=t,c883=t,c884=t,c885=t,c886=t,c887=t,c888=t,c889=t,c890=t,c891=t,c892=t,c893=t,c894=t,c895=t,c896=t,c897=t,c898=t,c899=t,c900=t,c901=t,c902=t,c903=t,c904=t,c905=t,c906=t,c907=t,c908=t,c909=t,c910=t,c911=t,c912=t,c913=t,c914=t,c915=t,c916=t,c917=t,c918=t,c919=t,c920=t,c921=t,c922=t,c923=t,c924=t,c925=t,c926=t,c927=t,c928=t,c929=t,c930=t,c931=t,c932=t,c933=t,c934=t,c935=t,c936=t,c937=t,c938=t,c939=t,c940=t,c941=t,c942=t,c943=t,c944=t,c945=t,c946=t,c947=t,c948=t,c949=t,c950=t,c951=t,c952=t,c953=t,c954=t,c955=t,c956=t,c957=t,c958=t,c959=t,c960=t,c961=t,c962=t,c963=t,c964=t,c965=t,c966=t,c967=t,c968=t,c969=t,c970=t,c971=t,c972=t,c973=t,c974=t,c975=t,c976=t,c977=t,c978=t,c979=t,c980=t,c981=t,c982=t,c983=t,c984=t,c985=t,c986=t,c987=t,c988=t,c989=t,c990=t,c991=t,c992=t,c993=t,c994=t,c995=t,c996=t,c997=t,c998=t,c999=t,c1000=t,c1001=t,c1002=t,c1003=t,c1004=t,c1005=t,c1006=t,c1007=t,c1008=t,c1009=t,c1010=t,c1011=t,c1012=t,c1013=t,c1014=t,c1015=t,c1016=t,c1017=t,c1018=t,c1019=t,c1020=t,c1021=t,c1022=t,c1023=t,c1024=t,c1025=t,c1026=t," + "c1027=t,c1028=t,c1029=t,c1030=t,c1031=t,c1032=t,c1033=t,c1034=t,c1035=t,c1036=t,c1037=t,c1038=t,c1039=t,c1040=t,c1041=t,c1042=t,c1043=t,c1044=t,c1045=t,c1046=t,c1047=t,c1048=t,c1049=t,c1050=t,c1051=t,c1052=t,c1053=t,c1054=t,c1055=t,c1056=t,c1057=t,c1058=t,c1059=t,c1060=t,c1061=t,c1062=t,c1063=t,c1064=t,c1065=t,c1066=t,c1067=t,c1068=t,c1069=t,c1070=t,c1071=t,c1072=t,c1073=t,c1074=t,c1075=t,c1076=t,c1077=t,c1078=t,c1079=t,c1080=t,c1081=t,c1082=t,c1083=t,c1084=t,c1085=t,c1086=t,c1087=t,c1088=t,c1089=t,c1090=t,c1091=t,c1092=t,c1093=t,c1094=t,c1095=t,c1096=t,c1097=t,c1098=t,c1099=t,c1100=t,c1101=t,c1102=t,c1103=t,c1104=t,c1105=t,c1106=t,c1107=t,c1108=t,c1109=t,c1110=t,c1111=t,c1112=t,c1113=t,c1114=t,c1115=t,c1116=t,c1117=t,c1118=t,c1119=t,c1120=t,c1121=t,c1122=t,c1123=t,c1124=t,c1125=t,c1126=t,c1127=t,c1128=t,c1129=t,c1130=t,c1131=t,c1132=t,c1133=t,c1134=t,c1135=t,c1136=t,c1137=t,c1138=t,c1139=t,c1140=t,c1141=t,c1142=t,c1143=t,c1144=t,c1145=t,c1146=t,c1147=t,c1148=t,c1149=t,c1150=t,c1151=t,c1152=t,c1153=t,c1154=t,c1155=t,c1156=t,c1157=t,c1158=t,c1159=t,c1160=t,c1161=t,c1162=t,c1163=t,c1164=t,c1165=t,c1166=t,c1167=t,c1168=t,c1169=t,c1170=t,c1171=t,c1172=t,c1173=t," + "c1174=t,c1175=t,c1176=t,c1177=t,c1178=t,c1179=t,c1180=t,c1181=t,c1182=t,c1183=t,c1184=t,c1185=t,c1186=t,c1187=t,c1188=t,c1189=t,c1190=t,c1191=t,c1192=t,c1193=t,c1194=t,c1195=t,c1196=t,c1197=t,c1198=t,c1199=t,c1200=t,c1201=t,c1202=t,c1203=t,c1204=t,c1205=t,c1206=t,c1207=t,c1208=t,c1209=t,c1210=t,c1211=t,c1212=t,c1213=t,c1214=t,c1215=t,c1216=t,c1217=t,c1218=t,c1219=t,c1220=t,c1221=t,c1222=t,c1223=t,c1224=t,c1225=t,c1226=t,c1227=t,c1228=t,c1229=t,c1230=t,c1231=t,c1232=t,c1233=t,c1234=t,c1235=t,c1236=t,c1237=t,c1238=t,c1239=t,c1240=t,c1241=t,c1242=t,c1243=t,c1244=t,c1245=t,c1246=t,c1247=t,c1248=t,c1249=t,c1250=t,c1251=t,c1252=t,c1253=t,c1254=t,c1255=t,c1256=t,c1257=t,c1258=t,c1259=t,c1260=t,c1261=t,c1262=t,c1263=t,c1264=t,c1265=t,c1266=t,c1267=t,c1268=t,c1269=t,c1270=t,c1271=t,c1272=t,c1273=t,c1274=t,c1275=t,c1276=t,c1277=t,c1278=t,c1279=t,c1280=t,c1281=t,c1282=t,c1283=t,c1284=t,c1285=t,c1286=t,c1287=t,c1288=t,c1289=t,c1290=t,c1291=t,c1292=t,c1293=t,c1294=t,c1295=t,c1296=t,c1297=t,c1298=t,c1299=t,c1300=t,c1301=t,c1302=t,c1303=t,c1304=t,c1305=t,c1306=t,c1307=t,c1308=t,c1309=t,c1310=t,c1311=t,c1312=t,c1313=t,c1314=t,c1315=t,c1316=t,c1317=t,c1318=t,c1319=t,c1320=t," + "c1321=t,c1322=t,c1323=t,c1324=t,c1325=t,c1326=t,c1327=t,c1328=t,c1329=t,c1330=t,c1331=t,c1332=t,c1333=t,c1334=t,c1335=t,c1336=t,c1337=t,c1338=t,c1339=t,c1340=t,c1341=t,c1342=t,c1343=t,c1344=t,c1345=t,c1346=t,c1347=t," + "c1348=t,c1349=t,c1350=t,c1351=t,c1352=t,c1353=t,c1354=t,c1355=t,c1356=t,c1357=t,c1358=t,c1359=t,c1360=t,c1361=t,c1362=t,c1363=t,c1364=t,c1365=t,c1366=t,c1367=t,c1368=t,c1369=t,c1370=t,c1371=t,c1372=t,c1373=t,c1374=t,c1375=t,c1376=t,c1377=t,c1378=t,c1379=t,c1380=t,c1381=t,c1382=t,c1383=t,c1384=t,c1385=t,c1386=t,c1387=t,c1388=t,c1389=t,c1390=t,c1391=t,c1392=t,c1393=t,c1394=t,c1395=t,c1396=t,c1397=t,c1398=t,c1399=t,c1400=t,c1401=t,c1402=t,c1403=t,c1404=t,c1405=t,c1406=t,c1407=t,c1408=t,c1409=t,c1410=t,c1411=t,c1412=t,c1413=t,c1414=t,c1415=t,c1416=t,c1417=t,c1418=t,c1419=t,c1420=t,c1421=t,c1422=t,c1423=t,c1424=t,c1425=t,c1426=t,c1427=t,c1428=t,c1429=t,c1430=t,c1431=t,c1432=t,c1433=t,c1434=t,c1435=t,c1436=t,c1437=t,c1438=t,c1439=t,c1440=t,c1441=t,c1442=t,c1443=t,c1444=t,c1445=t,c1446=t,c1447=t,c1448=t,c1449=t,c1450=t,c1451=t,c1452=t,c1453=t,c1454=t,c1455=t,c1456=t,c1457=t,c1458=t,c1459=t,c1460=t,c1461=t,c1462=t,c1463=t,c1464=t,c1465=t,c1466=t,c1467=t,c1468=t,c1469=t,c1470=t,c1471=t,c1472=t,c1473=t,c1474=t,c1475=t,c1476=t,c1477=t,c1478=t,c1479=t,c1480=t,c1481=t,c1482=t,c1483=t,c1484=t,c1485=t,c1486=t,c1487=t,c1488=t,c1489=t,c1490=t,c1491=t,c1492=t,c1493=t,c1494=t," + "c1495=t,c1496=t,c1497=t,c1498=t,c1499=t,c1500=t,c1501=t,c1502=t,c1503=t,c1504=t,c1505=t,c1506=t,c1507=t,c1508=t,c1509=t,c1510=t,c1511=t,c1512=t,c1513=t,c1514=t,c1515=t,c1516=t,c1517=t,c1518=t,c1519=t,c1520=t,c1521=t,c1522=t,c1523=t,c1524=t,c1525=t,c1526=t,c1527=t,c1528=t,c1529=t,c1530=t,c1531=t,c1532=t,c1533=t,c1534=t,c1535=t,c1536=t,c1537=t,c1538=t,c1539=t,c1540=t,c1541=t,c1542=t,c1543=t,c1544=t,c1545=t,c1546=t,c1547=t,c1548=t,c1549=t,c1550=t,c1551=t,c1552=t,c1553=t,c1554=t,c1555=t,c1556=t,c1557=t,c1558=t,c1559=t,c1560=t,c1561=t,c1562=t,c1563=t,c1564=t,c1565=t,c1566=t,c1567=t,c1568=t,c1569=t,c1570=t,c1571=t,c1572=t,c1573=t,c1574=t,c1575=t,c1576=t,c1577=t,c1578=t,c1579=t,c1580=t,c1581=t,c1582=t,c1583=t,c1584=t,c1585=t,c1586=t,c1587=t,c1588=t,c1589=t,c1590=t,c1591=t,c1592=t,c1593=t,c1594=t,c1595=t,c1596=t,c1597=t,c1598=t,c1599=t,c1600=t,c1601=t,c1602=t,c1603=t,c1604=t,c1605=t,c1606=t,c1607=t,c1608=t,c1609=t,c1610=t,c1611=t,c1612=t,c1613=t,c1614=t,c1615=t,c1616=t,c1617=t,c1618=t,c1619=t,c1620=t,c1621=t,c1622=t,c1623=t,c1624=t,c1625=t,c1626=t,c1627=t,c1628=t,c1629=t,c1630=t,c1631=t,c1632=t,c1633=t,c1634=t,c1635=t,c1636=t,c1637=t,c1638=t,c1639=t,c1640=t,c1641=t," + "c1642=t,c1643=t,c1644=t,c1645=t,c1646=t,c1647=t,c1648=t,c1649=t,c1650=t,c1651=t,c1652=t,c1653=t,c1654=t,c1655=t,c1656=t,c1657=t,c1658=t,c1659=t,c1660=t,c1661=t,c1662=t,c1663=t,c1664=t,c1665=t,c1666=t,c1667=t,c1668=t,c1669=t,c1670=t,c1671=t,c1672=t,c1673=t,c1674=t,c1675=t,c1676=t,c1677=t,c1678=t,c1679=t,c1680=t,c1681=t,c1682=t,c1683=t,c1684=t,c1685=t,c1686=t,c1687=t,c1688=t,c1689=t,c1690=t,c1691=t,c1692=t,c1693=t,c1694=t,c1695=t,c1696=t,c1697=t,c1698=t,c1699=t,c1700=t,c1701=t,c1702=t,c1703=t,c1704=t,c1705=t,c1706=t,c1707=t,c1708=t,c1709=t,c1710=t,c1711=t,c1712=t,c1713=t,c1714=t,c1715=t,c1716=t,c1717=t,c1718=t,c1719=t,c1720=t,c1721=t,c1722=t,c1723=t,c1724=t,c1725=t,c1726=t,c1727=t,c1728=t,c1729=t,c1730=t,c1731=t,c1732=t,c1733=t,c1734=t,c1735=t,c1736=t,c1737=t,c1738=t,c1739=t,c1740=t,c1741=t,c1742=t,c1743=t,c1744=t,c1745=t,c1746=t,c1747=t,c1748=t,c1749=t,c1750=t,c1751=t,c1752=t,c1753=t,c1754=t,c1755=t,c1756=t,c1757=t,c1758=t,c1759=t,c1760=t,c1761=t,c1762=t,c1763=t,c1764=t,c1765=t,c1766=t,c1767=t,c1768=t,c1769=t,c1770=t,c1771=t,c1772=t,c1773=t,c1774=t,c1775=t,c1776=t,c1777=t,c1778=t,c1779=t,c1780=t,c1781=t,c1782=t,c1783=t,c1784=t,c1785=t,c1786=t,c1787=t,c1788=t," + "c1789=t,c1790=t,c1791=t,c1792=t,c1793=t,c1794=t,c1795=t,c1796=t,c1797=t,c1798=t,c1799=t,c1800=t,c1801=t,c1802=t,c1803=t,c1804=t,c1805=t,c1806=t,c1807=t,c1808=t,c1809=t,c1810=t,c1811=t,c1812=t,c1813=t,c1814=t,c1815=t," + "c1816=t,c1817=t,c1818=t,c1819=t,c1820=t,c1821=t,c1822=t,c1823=t,c1824=t,c1825=t,c1826=t,c1827=t,c1828=t,c1829=t,c1830=t,c1831=t,c1832=t,c1833=t,c1834=t,c1835=t,c1836=t,c1837=t,c1838=t,c1839=t,c1840=t,c1841=t,c1842=t,c1843=t,c1844=t,c1845=t,c1846=t,c1847=t,c1848=t,c1849=t,c1850=t,c1851=t,c1852=t,c1853=t,c1854=t,c1855=t,c1856=t,c1857=t,c1858=t,c1859=t,c1860=t,c1861=t,c1862=t,c1863=t,c1864=t,c1865=t,c1866=t,c1867=t,c1868=t,c1869=t,c1870=t,c1871=t,c1872=t,c1873=t,c1874=t,c1875=t,c1876=t,c1877=t,c1878=t,c1879=t,c1880=t,c1881=t,c1882=t,c1883=t,c1884=t,c1885=t,c1886=t,c1887=t,c1888=t,c1889=t,c1890=t,c1891=t,c1892=t,c1893=t,c1894=t,c1895=t,c1896=t,c1897=t,c1898=t,c1899=t,c1900=t,c1901=t,c1902=t,c1903=t,c1904=t,c1905=t,c1906=t,c1907=t,c1908=t,c1909=t,c1910=t,c1911=t,c1912=t,c1913=t,c1914=t,c1915=t,c1916=t,c1917=t,c1918=t,c1919=t,c1920=t,c1921=t,c1922=t,c1923=t,c1924=t,c1925=t,c1926=t,c1927=t,c1928=t,c1929=t,c1930=t,c1931=t,c1932=t,c1933=t,c1934=t,c1935=t,c1936=t,c1937=t,c1938=t,c1939=t,c1940=t,c1941=t,c1942=t,c1943=t,c1944=t,c1945=t,c1946=t,c1947=t,c1948=t,c1949=t,c1950=t,c1951=t,c1952=t,c1953=t,c1954=t,c1955=t,c1956=t,c1957=t,c1958=t,c1959=t,c1960=t,c1961=t,c1962=t," + "c1963=t,c1964=t,c1965=t,c1966=t,c1967=t,c1968=t,c1969=t,c1970=t,c1971=t,c1972=t,c1973=t,c1974=t,c1975=t,c1976=t,c1977=t,c1978=t,c1979=t,c1980=t,c1981=t,c1982=t,c1983=t,c1984=t,c1985=t,c1986=t,c1987=t,c1988=t,c1989=t,c1990=t,c1991=t,c1992=t,c1993=t,c1994=t,c1995=t,c1996=t,c1997=t,c1998=t,c1999=t,c2000=t,c2001=t,c2002=t,c2003=t,c2004=t,c2005=t,c2006=t,c2007=t,c2008=t,c2009=t,c2010=t,c2011=t,c2012=t,c2013=t,c2014=t,c2015=t,c2016=t,c2017=t,c2018=t,c2019=t,c2020=t,c2021=t,c2022=t,c2023=t,c2024=t,c2025=t,c2026=t,c2027=t,c2028=t,c2029=t,c2030=t,c2031=t,c2032=t,c2033=t,c2034=t,c2035=t,c2036=t,c2037=t,c2038=t,c2039=t,c2040=t,c2041=t,c2042=t,c2043=t,c2044=t,c2045=t,c2046=t,c2047=t,c2048=t,c2049=t,c2050=t,c2051=t,c2052=t,c2053=t,c2054=t,c2055=t,c2056=t,c2057=t,c2058=t,c2059=t,c2060=t,c2061=t,c2062=t,c2063=t,c2064=t,c2065=t,c2066=t,c2067=t,c2068=t,c2069=t,c2070=t,c2071=t,c2072=t,c2073=t,c2074=t,c2075=t,c2076=t,c2077=t,c2078=t,c2079=t,c2080=t,c2081=t,c2082=t,c2083=t,c2084=t,c2085=t,c2086=t,c2087=t,c2088=t,c2089=t,c2090=t,c2091=t,c2092=t,c2093=t,c2094=t,c2095=t,c2096=t,c2097=t,c2098=t,c2099=t,c2100=t,c2101=t,c2102=t,c2103=t,c2104=t,c2105=t,c2106=t,c2107=t,c2108=t,c2109=t," + "c2110=t,c2111=t,c2112=t,c2113=t,c2114=t,c2115=t,c2116=t,c2117=t,c2118=t,c2119=t,c2120=t,c2121=t,c2122=t,c2123=t,c2124=t,c2125=t,c2126=t,c2127=t,c2128=t,c2129=t,c2130=t,c2131=t,c2132=t,c2133=t,c2134=t,c2135=t,c2136=t,c2137=t,c2138=t,c2139=t,c2140=t,c2141=t,c2142=t,c2143=t,c2144=t,c2145=t,c2146=t,c2147=t,c2148=t,c2149=t,c2150=t,c2151=t,c2152=t,c2153=t,c2154=t,c2155=t,c2156=t,c2157=t,c2158=t,c2159=t,c2160=t,c2161=t,c2162=t,c2163=t,c2164=t,c2165=t,c2166=t,c2167=t,c2168=t,c2169=t,c2170=t,c2171=t,c2172=t,c2173=t,c2174=t,c2175=t,c2176=t,c2177=t,c2178=t,c2179=t,c2180=t,c2181=t,c2182=t,c2183=t,c2184=t,c2185=t,c2186=t,c2187=t,c2188=t,c2189=t,c2190=t,c2191=t,c2192=t,c2193=t,c2194=t,c2195=t,c2196=t,c2197=t,c2198=t,c2199=t,c2200=t,c2201=t,c2202=t,c2203=t,c2204=t,c2205=t,c2206=t,c2207=t,c2208=t,c2209=t,c2210=t,c2211=t,c2212=t,c2213=t,c2214=t,c2215=t,c2216=t,c2217=t,c2218=t,c2219=t,c2220=t,c2221=t,c2222=t,c2223=t,c2224=t,c2225=t,c2226=t,c2227=t,c2228=t,c2229=t,c2230=t,c2231=t,c2232=t,c2233=t,c2234=t,c2235=t,c2236=t,c2237=t,c2238=t,c2239=t,c2240=t,c2241=t,c2242=t,c2243=t,c2244=t,c2245=t,c2246=t,c2247=t,c2248=t,c2249=t,c2250=t,c2251=t,c2252=t,c2253=t,c2254=t,c2255=t,c2256=t," + "c2257=t,c2258=t,c2259=t,c2260=t,c2261=t,c2262=t,c2263=t,c2264=t,c2265=t,c2266=t,c2267=t,c2268=t,c2269=t,c2270=t,c2271=t,c2272=t,c2273=t,c2274=t,c2275=t,c2276=t,c2277=t,c2278=t,c2279=t,c2280=t,c2281=t,c2282=t,c2283=t," + "c2284=t,c2285=t,c2286=t,c2287=t,c2288=t,c2289=t,c2290=t,c2291=t,c2292=t,c2293=t,c2294=t,c2295=t,c2296=t,c2297=t,c2298=t,c2299=t,c2300=t,c2301=t,c2302=t,c2303=t,c2304=t,c2305=t,c2306=t,c2307=t,c2308=t,c2309=t,c2310=t,c2311=t,c2312=t,c2313=t,c2314=t,c2315=t,c2316=t,c2317=t,c2318=t,c2319=t,c2320=t,c2321=t,c2322=t,c2323=t,c2324=t,c2325=t,c2326=t,c2327=t,c2328=t,c2329=t,c2330=t,c2331=t,c2332=t,c2333=t,c2334=t,c2335=t,c2336=t,c2337=t,c2338=t,c2339=t,c2340=t,c2341=t,c2342=t,c2343=t,c2344=t,c2345=t,c2346=t,c2347=t,c2348=t,c2349=t,c2350=t,c2351=t,c2352=t,c2353=t,c2354=t,c2355=t,c2356=t,c2357=t,c2358=t,c2359=t,c2360=t,c2361=t,c2362=t,c2363=t,c2364=t,c2365=t,c2366=t,c2367=t,c2368=t,c2369=t,c2370=t,c2371=t,c2372=t,c2373=t,c2374=t,c2375=t,c2376=t,c2377=t,c2378=t,c2379=t,c2380=t,c2381=t,c2382=t,c2383=t,c2384=t,c2385=t,c2386=t,c2387=t,c2388=t,c2389=t,c2390=t,c2391=t,c2392=t,c2393=t,c2394=t,c2395=t,c2396=t,c2397=t,c2398=t,c2399=t,c2400=t,c2401=t,c2402=t,c2403=t,c2404=t,c2405=t,c2406=t,c2407=t,c2408=t,c2409=t,c2410=t,c2411=t,c2412=t,c2413=t,c2414=t,c2415=t,c2416=t,c2417=t,c2418=t,c2419=t,c2420=t,c2421=t,c2422=t,c2423=t,c2424=t,c2425=t,c2426=t,c2427=t,c2428=t,c2429=t,c2430=t," + "c2431=t,c2432=t,c2433=t,c2434=t,c2435=t,c2436=t,c2437=t,c2438=t,c2439=t,c2440=t,c2441=t,c2442=t,c2443=t,c2444=t,c2445=t,c2446=t,c2447=t,c2448=t,c2449=t,c2450=t,c2451=t,c2452=t,c2453=t,c2454=t,c2455=t,c2456=t,c2457=t,c2458=t,c2459=t,c2460=t,c2461=t,c2462=t,c2463=t,c2464=t,c2465=t,c2466=t,c2467=t,c2468=t,c2469=t,c2470=t,c2471=t,c2472=t,c2473=t,c2474=t,c2475=t,c2476=t,c2477=t,c2478=t,c2479=t,c2480=t,c2481=t,c2482=t,c2483=t,c2484=t,c2485=t,c2486=t,c2487=t,c2488=t,c2489=t,c2490=t,c2491=t,c2492=t,c2493=t,c2494=t,c2495=t,c2496=t,c2497=t,c2498=t,c2499=t,c2500=t,c2501=t,c2502=t,c2503=t,c2504=t,c2505=t,c2506=t,c2507=t,c2508=t,c2509=t,c2510=t,c2511=t,c2512=t,c2513=t,c2514=t,c2515=t,c2516=t,c2517=t,c2518=t,c2519=t,c2520=t,c2521=t,c2522=t,c2523=t,c2524=t,c2525=t,c2526=t,c2527=t,c2528=t,c2529=t,c2530=t,c2531=t,c2532=t,c2533=t,c2534=t,c2535=t,c2536=t,c2537=t,c2538=t,c2539=t,c2540=t,c2541=t,c2542=t,c2543=t,c2544=t,c2545=t,c2546=t,c2547=t,c2548=t,c2549=t,c2550=t,c2551=t,c2552=t,c2553=t,c2554=t,c2555=t,c2556=t,c2557=t,c2558=t,c2559=t,c2560=t,c2561=t,c2562=t,c2563=t,c2564=t,c2565=t,c2566=t,c2567=t,c2568=t,c2569=t,c2570=t,c2571=t,c2572=t,c2573=t,c2574=t,c2575=t,c2576=t,c2577=t," + "c2578=t,c2579=t,c2580=t,c2581=t,c2582=t,c2583=t,c2584=t,c2585=t,c2586=t,c2587=t,c2588=t,c2589=t,c2590=t,c2591=t,c2592=t,c2593=t,c2594=t,c2595=t,c2596=t,c2597=t,c2598=t,c2599=t,c2600=t,c2601=t,c2602=t,c2603=t,c2604=t,c2605=t,c2606=t,c2607=t,c2608=t,c2609=t,c2610=t,c2611=t,c2612=t,c2613=t,c2614=t,c2615=t,c2616=t,c2617=t,c2618=t,c2619=t,c2620=t,c2621=t,c2622=t,c2623=t,c2624=t,c2625=t,c2626=t,c2627=t,c2628=t,c2629=t,c2630=t,c2631=t,c2632=t,c2633=t,c2634=t,c2635=t,c2636=t,c2637=t,c2638=t,c2639=t,c2640=t,c2641=t,c2642=t,c2643=t,c2644=t,c2645=t,c2646=t,c2647=t,c2648=t,c2649=t,c2650=t,c2651=t,c2652=t,c2653=t,c2654=t,c2655=t,c2656=t,c2657=t,c2658=t,c2659=t,c2660=t,c2661=t,c2662=t,c2663=t,c2664=t,c2665=t,c2666=t,c2667=t,c2668=t,c2669=t,c2670=t,c2671=t,c2672=t,c2673=t,c2674=t,c2675=t,c2676=t,c2677=t,c2678=t,c2679=t,c2680=t,c2681=t,c2682=t,c2683=t,c2684=t,c2685=t,c2686=t,c2687=t,c2688=t,c2689=t,c2690=t,c2691=t,c2692=t,c2693=t,c2694=t,c2695=t,c2696=t,c2697=t,c2698=t,c2699=t,c2700=t,c2701=t,c2702=t,c2703=t,c2704=t,c2705=t,c2706=t,c2707=t,c2708=t,c2709=t,c2710=t,c2711=t,c2712=t,c2713=t,c2714=t,c2715=t,c2716=t,c2717=t,c2718=t,c2719=t,c2720=t,c2721=t,c2722=t,c2723=t,c2724=t," + "c2725=t,c2726=t,c2727=t,c2728=t,c2729=t,c2730=t,c2731=t,c2732=t,c2733=t,c2734=t,c2735=t,c2736=t,c2737=t,c2738=t,c2739=t,c2740=t,c2741=t,c2742=t,c2743=t,c2744=t,c2745=t,c2746=t,c2747=t,c2748=t,c2749=t,c2750=t,c2751=t,c2752=t,c2753=t,c2754=t,c2755=t,c2756=t,c2757=t,c2758=t,c2759=t,c2760=t,c2761=t,c2762=t,c2763=t,c2764=t,c2765=t,c2766=t,c2767=t,c2768=t,c2769=t,c2770=t,c2771=t,c2772=t,c2773=t,c2774=t,c2775=t,c2776=t,c2777=t,c2778=t,c2779=t,c2780=t,c2781=t,c2782=t,c2783=t,c2784=t,c2785=t,c2786=t,c2787=t,c2788=t,c2789=t,c2790=t,c2791=t,c2792=t,c2793=t,c2794=t,c2795=t,c2796=t,c2797=t,c2798=t,c2799=t,c2800=t,c2801=t,c2802=t,c2803=t,c2804=t,c2805=t,c2806=t,c2807=t,c2808=t,c2809=t,c2810=t,c2811=t,c2812=t,c2813=t,c2814=t,c2815=t,c2816=t,c2817=t,c2818=t,c2819=t,c2820=t,c2821=t,c2822=t,c2823=t,c2824=t,c2825=t,c2826=t,c2827=t,c2828=t,c2829=t,c2830=t,c2831=t,c2832=t,c2833=t,c2834=t,c2835=t,c2836=t,c2837=t,c2838=t,c2839=t,c2840=t,c2841=t,c2842=t,c2843=t,c2844=t,c2845=t,c2846=t,c2847=t,c2848=t,c2849=t,c2850=t,c2851=t,c2852=t,c2853=t,c2854=t,c2855=t,c2856=t,c2857=t,c2858=t,c2859=t,c2860=t,c2861=t,c2862=t,c2863=t,c2864=t,c2865=t,c2866=t,c2867=t,c2868=t,c2869=t,c2870=t,c2871=t," + "c2872=t,c2873=t,c2874=t,c2875=t,c2876=t,c2877=t,c2878=t,c2879=t,c2880=t,c2881=t,c2882=t,c2883=t,c2884=t,c2885=t,c2886=t,c2887=t,c2888=t,c2889=t,c2890=t,c2891=t,c2892=t,c2893=t,c2894=t,c2895=t,c2896=t,c2897=t,c2898=t,c2899=t,c2900=t,c2901=t,c2902=t,c2903=t,c2904=t,c2905=t,c2906=t,c2907=t,c2908=t,c2909=t,c2910=t,c2911=t,c2912=t,c2913=t,c2914=t,c2915=t,c2916=t,c2917=t,c2918=t,c2919=t,c2920=t,c2921=t,c2922=t,c2923=t,c2924=t,c2925=t,c2926=t,c2927=t,c2928=t,c2929=t,c2930=t,c2931=t,c2932=t,c2933=t,c2934=t,c2935=t,c2936=t,c2937=t,c2938=t,c2939=t,c2940=t,c2941=t,c2942=t,c2943=t,c2944=t,c2945=t,c2946=t,c2947=t,c2948=t,c2949=t,c2950=t,c2951=t,c2952=t,c2953=t,c2954=t,c2955=t,c2956=t,c2957=t,c2958=t,c2959=t,c2960=t,c2961=t,c2962=t,c2963=t,c2964=t,c2965=t,c2966=t,c2967=t,c2968=t,c2969=t,c2970=t,c2971=t,c2972=t,c2973=t,c2974=t,c2975=t,c2976=t,c2977=t,c2978=t,c2979=t,c2980=t,c2981=t,c2982=t,c2983=t,c2984=t,c2985=t,c2986=t,c2987=t,c2988=t,c2989=t,c2990=t,c2991=t,c2992=t,c2993=t,c2994=t,c2995=t,c2996=t,c2997=t,c2998=t,c2999=t,c3000=t,c3001=t,c3002=t,c3003=t,c3004=t,c3005=t,c3006=t,c3007=t,c3008=t,c3009=t,c3010=t,c3011=t,c3012=t,c3013=t,c3014=t,c3015=t,c3016=t,c3017=t,c3018=t," + "c3019=t,c3020=t,c3021=t,c3022=t,c3023=t,c3024=t,c3025=t,c3026=t,c3027=t,c3028=t,c3029=t,c3030=t,c3031=t,c3032=t,c3033=t,c3034=t,c3035=t,c3036=t,c3037=t,c3038=t,c3039=t,c3040=t,c3041=t,c3042=t,c3043=t,c3044=t,c3045=t,c3046=t,c3047=t,c3048=t,c3049=t,c3050=t,c3051=t,c3052=t,c3053=t,c3054=t,c3055=t,c3056=t,c3057=t,c3058=t,c3059=t,c3060=t,c3061=t,c3062=t,c3063=t,c3064=t,c3065=t,c3066=t,c3067=t,c3068=t,c3069=t,c3070=t,c3071=t,c3072=t,c3073=t,c3074=t,c3075=t,c3076=t,c3077=t,c3078=t,c3079=t,c3080=t,c3081=t,c3082=t,c3083=t,c3084=t,c3085=t,c3086=t,c3087=t,c3088=t,c3089=t,c3090=t,c3091=t,c3092=t,c3093=t,c3094=t,c3095=t,c3096=t,c3097=t,c3098=t,c3099=t,c3100=t,c3101=t,c3102=t,c3103=t,c3104=t,c3105=t,c3106=t,c3107=t,c3108=t,c3109=t,c3110=t,c3111=t,c3112=t,c3113=t,c3114=t,c3115=t,c3116=t,c3117=t,c3118=t,c3119=t,c3120=t,c3121=t,c3122=t,c3123=t,c3124=t,c3125=t,c3126=t,c3127=t,c3128=t,c3129=t,c3130=t,c3131=t,c3132=t,c3133=t,c3134=t,c3135=t,c3136=t,c3137=t,c3138=t,c3139=t,c3140=t,c3141=t,c3142=t,c3143=t,c3144=t,c3145=t,c3146=t,c3147=t,c3148=t,c3149=t,c3150=t,c3151=t,c3152=t,c3153=t,c3154=t,c3155=t,c3156=t,c3157=t,c3158=t,c3159=t,c3160=t,c3161=t,c3162=t,c3163=t,c3164=t,c3165=t," + "c3166=t,c3167=t,c3168=t,c3169=t,c3170=t,c3171=t,c3172=t,c3173=t,c3174=t,c3175=t,c3176=t,c3177=t,c3178=t,c3179=t,c3180=t,c3181=t,c3182=t,c3183=t,c3184=t,c3185=t,c3186=t,c3187=t,c3188=t,c3189=t,c3190=t,c3191=t,c3192=t,c3193=t,c3194=t,c3195=t,c3196=t,c3197=t,c3198=t,c3199=t,c3200=t,c3201=t,c3202=t,c3203=t,c3204=t,c3205=t,c3206=t,c3207=t,c3208=t,c3209=t,c3210=t,c3211=t,c3212=t,c3213=t,c3214=t,c3215=t,c3216=t,c3217=t,c3218=t,c3219=t,c3220=t,c3221=t,c3222=t,c3223=t,c3224=t,c3225=t,c3226=t,c3227=t,c3228=t,c3229=t,c3230=t,c3231=t,c3232=t,c3233=t,c3234=t,c3235=t,c3236=t,c3237=t,c3238=t,c3239=t,c3240=t,c3241=t,c3242=t,c3243=t,c3244=t,c3245=t,c3246=t,c3247=t,c3248=t,c3249=t,c3250=t,c3251=t,c3252=t,c3253=t,c3254=t,c3255=t,c3256=t,c3257=t,c3258=t,c3259=t,c3260=t,c3261=t,c3262=t,c3263=t,c3264=t,c3265=t,c3266=t,c3267=t,c3268=t,c3269=t,c3270=t,c3271=t,c3272=t,c3273=t,c3274=t,c3275=t,c3276=t,c3277=t,c3278=t,c3279=t,c3280=t,c3281=t,c3282=t,c3283=t,c3284=t,c3285=t,c3286=t,c3287=t,c3288=t,c3289=t,c3290=t,c3291=t,c3292=t,c3293=t,c3294=t,c3295=t,c3296=t,c3297=t,c3298=t,c3299=t,c3300=t,c3301=t,c3302=t,c3303=t,c3304=t,c3305=t,c3306=t,c3307=t,c3308=t,c3309=t,c3310=t,c3311=t,c3312=t," + "c3313=t,c3314=t,c3315=t,c3316=t,c3317=t,c3318=t,c3319=t,c3320=t,c3321=t,c3322=t,c3323=t,c3324=t,c3325=t,c3326=t,c3327=t,c3328=t,c3329=t,c3330=t,c3331=t,c3332=t,c3333=t,c3334=t,c3335=t,c3336=t,c3337=t,c3338=t,c3339=t,c3340=t,c3341=t,c3342=t,c3343=t,c3344=t,c3345=t,c3346=t,c3347=t,c3348=t,c3349=t,c3350=t,c3351=t,c3352=t,c3353=t,c3354=t,c3355=t,c3356=t,c3357=t,c3358=t,c3359=t,c3360=t,c3361=t,c3362=t,c3363=t,c3364=t,c3365=t,c3366=t,c3367=t,c3368=t,c3369=t,c3370=t,c3371=t,c3372=t,c3373=t,c3374=t,c3375=t,c3376=t,c3377=t,c3378=t,c3379=t,c3380=t,c3381=t,c3382=t,c3383=t,c3384=t,c3385=t,c3386=t,c3387=t,c3388=t,c3389=t,c3390=t,c3391=t,c3392=t,c3393=t,c3394=t,c3395=t,c3396=t,c3397=t,c3398=t,c3399=t,c3400=t,c3401=t,c3402=t,c3403=t,c3404=t,c3405=t,c3406=t,c3407=t,c3408=t,c3409=t,c3410=t,c3411=t,c3412=t,c3413=t,c3414=t,c3415=t,c3416=t,c3417=t,c3418=t,c3419=t,c3420=t,c3421=t,c3422=t,c3423=t,c3424=t,c3425=t,c3426=t,c3427=t,c3428=t,c3429=t,c3430=t,c3431=t,c3432=t,c3433=t,c3434=t,c3435=t,c3436=t,c3437=t,c3438=t,c3439=t,c3440=t,c3441=t,c3442=t,c3443=t,c3444=t,c3445=t,c3446=t,c3447=t,c3448=t,c3449=t,c3450=t,c3451=t,c3452=t,c3453=t,c3454=t,c3455=t,c3456=t,c3457=t,c3458=t,c3459=t," + "c3460=t,c3461=t,c3462=t,c3463=t,c3464=t,c3465=t,c3466=t,c3467=t,c3468=t,c3469=t,c3470=t,c3471=t,c3472=t,c3473=t,c3474=t,c3475=t,c3476=t,c3477=t,c3478=t,c3479=t,c3480=t,c3481=t,c3482=t,c3483=t,c3484=t,c3485=t,c3486=t,c3487=t,c3488=t,c3489=t,c3490=t,c3491=t,c3492=t,c3493=t,c3494=t,c3495=t,c3496=t,c3497=t,c3498=t,c3499=t,c3500=t,c3501=t,c3502=t,c3503=t,c3504=t,c3505=t,c3506=t,c3507=t,c3508=t,c3509=t,c3510=t,c3511=t,c3512=t,c3513=t," + "c3514=t,c3515=t,c3516=t,c3517=t,c3518=t,c3519=t,c3520=t,c3521=t,c3522=t,c3523=t,c3524=t,c3525=t,c3526=t,c3527=t,c3528=t,c3529=t,c3530=t,c3531=t,c3532=t,c3533=t,c3534=t,c3535=t,c3536=t,c3537=t,c3538=t,c3539=t,c3540=t,c3541=t,c3542=t,c3543=t,c3544=t,c3545=t,c3546=t,c3547=t,c3548=t,c3549=t,c3550=t,c3551=t,c3552=t,c3553=t,c3554=t,c3555=t,c3556=t,c3557=t,c3558=t,c3559=t,c3560=t,c3561=t,c3562=t,c3563=t,c3564=t,c3565=t,c3566=t,c3567=t,c3568=t,c3569=t,c3570=t,c3571=t,c3572=t,c3573=t,c3574=t,c3575=t,c3576=t,c3577=t,c3578=t,c3579=t,c3580=t,c3581=t,c3582=t,c3583=t,c3584=t,c3585=t,c3586=t,c3587=t,c3588=t,c3589=t,c3590=t,c3591=t,c3592=t,c3593=t,c3594=t,c3595=t,c3596=t,c3597=t,c3598=t,c3599=t,c3600=t,c3601=t,c3602=t,c3603=t,c3604=t,c3605=t,c3606=t,c3607=t,c3608=t,c3609=t,c3610=t,c3611=t,c3612=t,c3613=t,c3614=t,c3615=t,c3616=t,c3617=t,c3618=t,c3619=t,c3620=t,c3621=t,c3622=t,c3623=t,c3624=t,c3625=t,c3626=t,c3627=t,c3628=t,c3629=t,c3630=t,c3631=t,c3632=t,c3633=t,c3634=t,c3635=t,c3636=t,c3637=t,c3638=t,c3639=t,c3640=t,c3641=t,c3642=t,c3643=t,c3644=t,c3645=t,c3646=t,c3647=t,c3648=t,c3649=t,c3650=t,c3651=t,c3652=t,c3653=t,c3654=t,c3655=t,c3656=t,c3657=t,c3658=t,c3659=t,c3660=t," + "c3661=t,c3662=t,c3663=t,c3664=t,c3665=t,c3666=t,c3667=t,c3668=t,c3669=t,c3670=t,c3671=t,c3672=t,c3673=t,c3674=t,c3675=t,c3676=t,c3677=t,c3678=t,c3679=t,c3680=t,c3681=t,c3682=t,c3683=t,c3684=t,c3685=t,c3686=t,c3687=t,c3688=t,c3689=t,c3690=t,c3691=t,c3692=t,c3693=t,c3694=t,c3695=t,c3696=t,c3697=t,c3698=t,c3699=t,c3700=t,c3701=t,c3702=t,c3703=t,c3704=t,c3705=t,c3706=t,c3707=t,c3708=t,c3709=t,c3710=t,c3711=t,c3712=t,c3713=t,c3714=t,c3715=t,c3716=t,c3717=t,c3718=t,c3719=t,c3720=t,c3721=t,c3722=t,c3723=t,c3724=t,c3725=t,c3726=t,c3727=t,c3728=t,c3729=t,c3730=t,c3731=t,c3732=t,c3733=t,c3734=t,c3735=t,c3736=t,c3737=t,c3738=t,c3739=t,c3740=t,c3741=t,c3742=t,c3743=t,c3744=t,c3745=t,c3746=t,c3747=t,c3748=t,c3749=t,c3750=t,c3751=t,c3752=t,c3753=t,c3754=t,c3755=t,c3756=t,c3757=t,c3758=t,c3759=t,c3760=t,c3761=t,c3762=t,c3763=t,c3764=t,c3765=t,c3766=t,c3767=t,c3768=t,c3769=t,c3770=t,c3771=t,c3772=t,c3773=t,c3774=t,c3775=t,c3776=t,c3777=t,c3778=t,c3779=t,c3780=t,c3781=t,c3782=t,c3783=t,c3784=t,c3785=t,c3786=t,c3787=t,c3788=t,c3789=t,c3790=t,c3791=t,c3792=t,c3793=t,c3794=t,c3795=t,c3796=t,c3797=t,c3798=t,c3799=t,c3800=t,c3801=t,c3802=t,c3803=t,c3804=t,c3805=t,c3806=t,c3807=t," + "c3808=t,c3809=t,c3810=t,c3811=t,c3812=t,c3813=t,c3814=t,c3815=t,c3816=t,c3817=t,c3818=t,c3819=t,c3820=t,c3821=t,c3822=t,c3823=t,c3824=t,c3825=t,c3826=t,c3827=t,c3828=t,c3829=t,c3830=t,c3831=t,c3832=t,c3833=t,c3834=t,c3835=t,c3836=t,c3837=t,c3838=t,c3839=t,c3840=t,c3841=t,c3842=t,c3843=t,c3844=t,c3845=t,c3846=t,c3847=t,c3848=t,c3849=t,c3850=t,c3851=t,c3852=t,c3853=t,c3854=t,c3855=t,c3856=t,c3857=t,c3858=t,c3859=t,c3860=t,c3861=t,c3862=t,c3863=t,c3864=t,c3865=t,c3866=t,c3867=t,c3868=t,c3869=t,c3870=t,c3871=t,c3872=t,c3873=t,c3874=t,c3875=t,c3876=t,c3877=t,c3878=t,c3879=t,c3880=t,c3881=t,c3882=t,c3883=t,c3884=t,c3885=t,c3886=t,c3887=t,c3888=t,c3889=t,c3890=t,c3891=t,c3892=t,c3893=t,c3894=t,c3895=t,c3896=t,c3897=t,c3898=t,c3899=t,c3900=t,c3901=t,c3902=t,c3903=t,c3904=t,c3905=t,c3906=t,c3907=t,c3908=t,c3909=t,c3910=t,c3911=t,c3912=t,c3913=t,c3914=t,c3915=t,c3916=t,c3917=t,c3918=t,c3919=t,c3920=t,c3921=t,c3922=t,c3923=t,c3924=t,c3925=t,c3926=t,c3927=t,c3928=t,c3929=t,c3930=t,c3931=t,c3932=t,c3933=t,c3934=t,c3935=t,c3936=t,c3937=t,c3938=t,c3939=t,c3940=t,c3941=t,c3942=t,c3943=t,c3944=t,c3945=t,c3946=t,c3947=t,c3948=t,c3949=t,c3950=t,c3951=t,c3952=t,c3953=t,c3954=t," + "c3955=t,c3956=t,c3957=t,c3958=t,c3959=t,c3960=t,c3961=t,c3962=t,c3963=t,c3964=t,c3965=t,c3966=t,c3967=t,c3968=t,c3969=t,c3970=t,c3971=t,c3972=t,c3973=t,c3974=t,c3975=t,c3976=t,c3977=t,c3978=t,c3979=t,c3980=t,c3981=t,c3982=t,c3983=t,c3984=t,c3985=t,c3986=t,c3987=t,c3988=t,c3989=t,c3990=t,c3991=t,c3992=t,c3993=t,c3994=t,c3995=t,c3996=t,c3997=t,c3998=t,c3999=t,c4000=t,c4001=t,c4002=t,c4003=t,c4004=t,c4005=t,c4006=t,c4007=t,c4008=t,c4009=t,c4010=t,c4011=t,c4012=t,c4013=t,c4014=t,c4015=t,c4016=t,c4017=t,c4018=t,c4019=t,c4020=t,c4021=t,c4022=t,c4023=t,c4024=t,c4025=t,c4026=t,c4027=t,c4028=t,c4029=t,c4030=t,c4031=t,c4032=t,c4033=t,c4034=t,c4035=t,c4036=t,c4037=t,c4038=t,c4039=t,c4040=t,c4041=t,c4042=t,c4043=t,c4044=t,c4045=t,c4046=t,c4047=t,c4048=t,c4049=t,c4050=t,c4051=t,c4052=t,c4053=t,c4054=t,c4055=t,c4056=t,c4057=t,c4058=t,c4059=t,c4060=t,c4061=t,c4062=t,c4063=t,c4064=t,c4065=t,c4066=t,c4067=t,c4068=t,c4069=t,c4070=t,c4071=t,c4072=t,c4073=t,c4074=t,c4075=t,c4076=t,c4077=t,c4078=t,c4079=t,c4080=t,c4081=t,c4082=t,c4083=t,c4084=t,c4085=t,c4086=t,c4087=t,c4088=t,c4089=t,c4090=t,c4091=t,c4092=t,c4093=t 1626006833640000000" + }; + + int ret = TSDB_CODE_SUCCESS; + for(int i = 0; i < sizeof(sql)/sizeof(sql[0]); i++){ + ret = smlParseInfluxLine(info, sql[i]); + if(ret != TSDB_CODE_SUCCESS) break; + } + ASSERT_NE(ret, 0); + smlDestroyInfo(info); +} diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt index 1c11ee708569f44dbaa797bf58db20b62849e0fc..9c6d941172b7ec58737a33f1515da20f122f0c01 100644 --- a/source/common/CMakeLists.txt +++ b/source/common/CMakeLists.txt @@ -9,6 +9,11 @@ IF (TD_GRANT) ADD_DEFINITIONS(-D_GRANT) ENDIF () +IF (TD_STORAGE) + ADD_DEFINITIONS(-D_STORAGE) + TARGET_LINK_LIBRARIES(common PRIVATE storage) +ENDIF () + target_include_directories( common PUBLIC "${TD_SOURCE_DIR}/include/common" diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 6dddcc2f7422aaa09d4e8b7691cce0c2fc107b6d..dffef21ac49502534e0b42deaf9359c8562d0997 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -15,342 +15,347 @@ #include "systable.h" #include "taos.h" +#include "taosdef.h" #include "tdef.h" #include "tgrant.h" +#include "tmsg.h" #include "types.h" #define SYSTABLE_SCH_TABLE_NAME_LEN ((TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE) #define SYSTABLE_SCH_DB_NAME_LEN ((TSDB_DB_NAME_LEN - 1) + VARSTR_HEADER_SIZE) #define SYSTABLE_SCH_COL_NAME_LEN ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE) +// clang-format off static const SSysDbTableSchema dnodesSchema[] = { - {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT}, - {.name = "support_vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT}, - {.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "note", .bytes = 256 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true}, + {.name = "support_vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true}, + {.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true}, + {.name = "note", .bytes = 256 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, }; static const SSysDbTableSchema mnodesSchema[] = { - {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "role", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "status", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "role", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "status", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true}, }; static const SSysDbTableSchema modulesSchema[] = { - {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "endpoint", .bytes = 134 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "module", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "endpoint", .bytes = 134 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "module", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, }; static const SSysDbTableSchema qnodesSchema[] = { - {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true}, }; static const SSysDbTableSchema snodesSchema[] = { - {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true}, }; static const SSysDbTableSchema bnodesSchema[] = { - {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true}, }; static const SSysDbTableSchema clusterSchema[] = { - {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "name", .bytes = TSDB_CLUSTER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true}, + {.name = "name", .bytes = TSDB_CLUSTER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "uptime", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true}, }; static const SSysDbTableSchema userDBSchema[] = { - {.name = "name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "vgroups", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, - {.name = "strict", .bytes = TSDB_DB_STRICT_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "duration", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "keep", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "buffer", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "pages", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "minrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "maxrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "comp", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, - {.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "retention", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL}, - {.name = "cachemodel", .bytes = TSDB_CACHE_MODEL_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "cachesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "wal_level", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, - {.name = "wal_fsync_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "wal_retention_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "wal_retention_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "wal_roll_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "wal_segment_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, + {.name = "name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "vgroups", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true}, + {.name = "strict", .bytes = TSDB_DB_STRICT_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "duration", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "keep", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "buffer", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "pages", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "minrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "maxrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "comp", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true}, + {.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "retentions", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL, .sysInfo = true}, + {.name = "cachemodel", .bytes = TSDB_CACHE_MODEL_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "cachesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "wal_level", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true}, + {.name = "wal_fsync_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "wal_retention_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "wal_retention_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true}, + {.name = "wal_roll_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "wal_segment_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true}, }; static const SSysDbTableSchema userFuncSchema[] = { - {.name = "name", .bytes = TSDB_FUNC_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "comment", .bytes = PATH_MAX - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "aggregate", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "output_type", .bytes = TSDB_TYPE_STR_MAX_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "code_len", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "bufsize", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, + {.name = "name", .bytes = TSDB_FUNC_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "comment", .bytes = PATH_MAX - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "aggregate", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "output_type", .bytes = TSDB_TYPE_STR_MAX_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "code_len", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "bufsize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, }; static const SSysDbTableSchema userIdxSchema[] = { - {.name = "index_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "index_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, }; static const SSysDbTableSchema userStbsSchema[] = { - {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "tags", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "last_update", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "table_comment", .bytes = TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "watermark", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "max_delay", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "rollup", .bytes = 128 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "tags", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "last_update", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "table_comment", .bytes = TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "watermark", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "max_delay", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "rollup", .bytes = 128 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, }; static const SSysDbTableSchema streamSchema[] = { - {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "trigger", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "trigger", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, }; static const SSysDbTableSchema userTblsSchema[] = { - {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "uid", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "ttl", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "table_comment", .bytes = TSDB_TB_COMMENT_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "type", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "uid", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "ttl", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "table_comment", .bytes = TSDB_TB_COMMENT_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "type", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, }; static const SSysDbTableSchema userTagsSchema[] = { - {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "tag_name", .bytes = TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "tag_type", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "tag_value", .bytes = TSDB_MAX_TAGS_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "tag_name", .bytes = TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "tag_type", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "tag_value", .bytes = TSDB_MAX_TAGS_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, }; static const SSysDbTableSchema userTblDistSchema[] = { - {.name = "db_name", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "table_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "distributed_histogram", .bytes = 500 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "min_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "max_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "avg_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "stddev_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "rows", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "blocks", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "storage_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "compression_ratio", .bytes = 8, .type = TSDB_DATA_TYPE_DOUBLE}, - {.name = "rows_in_mem", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "seek_header_time", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, + {.name = "db_name", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "table_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "distributed_histogram", .bytes = 500 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "min_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "max_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "avg_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "stddev_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "rows", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true}, + {.name = "blocks", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "storage_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true}, + {.name = "compression_ratio", .bytes = 8, .type = TSDB_DATA_TYPE_DOUBLE, .sysInfo = true}, + {.name = "rows_in_mem", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "seek_header_time", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, }; static const SSysDbTableSchema userUsersSchema[] = { - {.name = "name", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "super", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, - {.name = "enable", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, - {.name = "sysinfo", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "name", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "super", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = false}, + {.name = "enable", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = false}, + {.name = "sysinfo", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, }; GRANTS_SCHEMA; static const SSysDbTableSchema vgroupsSchema[] = { - {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "tables", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "v1_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "v1_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "v2_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "v2_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "v3_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "v3_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "nfiles", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "file_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "tsma", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, + {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "tables", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "v1_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "v1_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "v2_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "v2_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "v3_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "v3_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "nfiles", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "file_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "tsma", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true}, }; static const SSysDbTableSchema smaSchema[] = { - {.name = "sma_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, + {.name = "sma_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, }; static const SSysDbTableSchema transSchema[] = { - {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "db1", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "db2", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "last_action_info", - .bytes = (TSDB_TRANS_ERROR_LEN - 1) + VARSTR_HEADER_SIZE, - .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "db1", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "db2", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "last_action_info", .bytes = (TSDB_TRANS_ERROR_LEN - 1) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, }; static const SSysDbTableSchema configSchema[] = { - {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, }; static const SSysDbTableSchema variablesSchema[] = { {.name = "dnode_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, +}; + +static const SSysDbTableSchema topicSchema[] = { + {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + // TODO config +}; + + +static const SSysDbTableSchema subscriptionSchema[] = { + {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, }; static const SSysTableMeta infosMeta[] = { - {TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema)}, - {TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema)}, - {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema)}, - {TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema)}, + {TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true}, + {TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true}, + {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true}, + {TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema), true}, // {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema)}, // {TSDB_INS_TABLE_BNODES, bnodesSchema, tListLen(bnodesSchema)}, - {TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema)}, - {TSDB_INS_TABLE_DATABASES, userDBSchema, tListLen(userDBSchema)}, - {TSDB_INS_TABLE_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema)}, - {TSDB_INS_TABLE_INDEXES, userIdxSchema, tListLen(userIdxSchema)}, - {TSDB_INS_TABLE_STABLES, userStbsSchema, tListLen(userStbsSchema)}, - {TSDB_INS_TABLE_TABLES, userTblsSchema, tListLen(userTblsSchema)}, - {TSDB_INS_TABLE_TAGS, userTagsSchema, tListLen(userTagsSchema)}, + {TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema), true}, + {TSDB_INS_TABLE_DATABASES, userDBSchema, tListLen(userDBSchema), false}, + {TSDB_INS_TABLE_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema), false}, + {TSDB_INS_TABLE_INDEXES, userIdxSchema, tListLen(userIdxSchema), false}, + {TSDB_INS_TABLE_STABLES, userStbsSchema, tListLen(userStbsSchema), false}, + {TSDB_INS_TABLE_TABLES, userTblsSchema, tListLen(userTblsSchema), false}, + {TSDB_INS_TABLE_TAGS, userTagsSchema, tListLen(userTagsSchema), false}, // {TSDB_INS_TABLE_TABLE_DISTRIBUTED, userTblDistSchema, tListLen(userTblDistSchema)}, - {TSDB_INS_TABLE_USERS, userUsersSchema, tListLen(userUsersSchema)}, - {TSDB_INS_TABLE_LICENCES, grantsSchema, tListLen(grantsSchema)}, - {TSDB_INS_TABLE_VGROUPS, vgroupsSchema, tListLen(vgroupsSchema)}, - {TSDB_INS_TABLE_CONFIGS, configSchema, tListLen(configSchema)}, - {TSDB_INS_TABLE_DNODE_VARIABLES, variablesSchema, tListLen(variablesSchema)}, + {TSDB_INS_TABLE_USERS, userUsersSchema, tListLen(userUsersSchema), false}, + {TSDB_INS_TABLE_LICENCES, grantsSchema, tListLen(grantsSchema), true}, + {TSDB_INS_TABLE_VGROUPS, vgroupsSchema, tListLen(vgroupsSchema), true}, + {TSDB_INS_TABLE_CONFIGS, configSchema, tListLen(configSchema), true}, + {TSDB_INS_TABLE_DNODE_VARIABLES, variablesSchema, tListLen(variablesSchema), true}, + {TSDB_INS_TABLE_TOPICS, topicSchema, tListLen(topicSchema), false}, + {TSDB_INS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema), false}, + {TSDB_INS_TABLE_STREAMS, streamSchema, tListLen(streamSchema), false}, }; static const SSysDbTableSchema connectionsSchema[] = { - {.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT}, - {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_UINT}, - {.name = "end_point", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "login_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT, .sysInfo = false}, + {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_UINT, .sysInfo = false}, + {.name = "end_point", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "login_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, }; -static const SSysDbTableSchema topicSchema[] = { - {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - // TODO config -}; static const SSysDbTableSchema consumerSchema[] = { - {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "consumer_group", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "client_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "topics", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - /*{.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},*/ - {.name = "up_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "subscribe_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, -}; - -static const SSysDbTableSchema subscriptionSchema[] = { - {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, + {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "consumer_group", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "client_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "topics", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + /*{.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},*/ + {.name = "up_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "subscribe_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, }; static const SSysDbTableSchema offsetSchema[] = { - {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "group_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "committed_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "current_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "skip_log_cnt", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, + {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "group_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "committed_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "current_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "skip_log_cnt", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, }; static const SSysDbTableSchema querySchema[] = { - {.name = "kill_id", .bytes = TSDB_QUERY_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "query_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT}, - {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "exec_usec", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, - {.name = "stable_query", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL}, - {.name = "sub_num", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "sub_status", .bytes = TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "kill_id", .bytes = TSDB_QUERY_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "query_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT, .sysInfo = false}, + {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "exec_usec", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "stable_query", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL, .sysInfo = false}, + {.name = "sub_num", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "sub_status", .bytes = TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, }; static const SSysDbTableSchema appSchema[] = { - {.name = "app_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "ip", .bytes = TSDB_IPv4ADDR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "name", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "start_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "insert_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "insert_row", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "insert_time", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "insert_bytes", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "fetch_bytes", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "query_time", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "slow_query", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "total_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "current_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, - {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "app_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "ip", .bytes = TSDB_IPv4ADDR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "name", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "start_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, + {.name = "insert_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "insert_row", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "insert_time", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "insert_bytes", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "fetch_bytes", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "query_time", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "slow_query", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "total_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "current_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false}, + {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, }; static const SSysTableMeta perfsMeta[] = { - {TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema)}, - {TSDB_PERFS_TABLE_QUERIES, querySchema, tListLen(querySchema)}, - {TSDB_PERFS_TABLE_TOPICS, topicSchema, tListLen(topicSchema)}, - {TSDB_PERFS_TABLE_CONSUMERS, consumerSchema, tListLen(consumerSchema)}, - {TSDB_PERFS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema)}, + {TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema), false}, + {TSDB_PERFS_TABLE_QUERIES, querySchema, tListLen(querySchema), false}, + {TSDB_PERFS_TABLE_CONSUMERS, consumerSchema, tListLen(consumerSchema), false}, // {TSDB_PERFS_TABLE_OFFSETS, offsetSchema, tListLen(offsetSchema)}, - {TSDB_PERFS_TABLE_TRANS, transSchema, tListLen(transSchema)}, - {TSDB_PERFS_TABLE_SMAS, smaSchema, tListLen(smaSchema)}, - {TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema)}, - {TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema)}}; + {TSDB_PERFS_TABLE_TRANS, transSchema, tListLen(transSchema), false}, + // {TSDB_PERFS_TABLE_SMAS, smaSchema, tListLen(smaSchema), false}, + {TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema), false}}; +// clang-format on void getInfosDbMeta(const SSysTableMeta** pInfosTableMeta, size_t* size) { if (pInfosTableMeta) { @@ -369,3 +374,26 @@ void getPerfDbMeta(const SSysTableMeta** pPerfsTableMeta, size_t* size) { *size = tListLen(perfsMeta); } } + +void getVisibleInfosTablesNum(bool sysInfo, size_t* size) { + if (sysInfo) { + getInfosDbMeta(NULL, size); + return; + } + *size = 0; + const SSysTableMeta* pMeta = NULL; + size_t totalNum = 0; + getInfosDbMeta(&pMeta, &totalNum); + for (size_t i = 0; i < totalNum; ++i) { + if (!pMeta[i].sysInfo) { + ++(*size); + } + } +} + +bool invisibleColumn(bool sysInfo, int8_t tableType, int8_t flags) { + if (sysInfo || TSDB_SYSTEM_TABLE != tableType) { + return false; + } + return 0 != (flags & COL_IS_SYSINFO); +} diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 84896277211ee06f941793beadeed89d60d0f10f..c7f372f17b3c174290396c0e0ca49229ff8df73b 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1228,6 +1228,7 @@ void blockDataFreeRes(SSDataBlock* pBlock) { } taosArrayDestroy(pBlock->pDataBlock); + pBlock->pDataBlock = NULL; taosMemoryFreeClear(pBlock->pBlockAgg); memset(&pBlock->info, 0, sizeof(SDataBlockInfo)); } @@ -1343,12 +1344,14 @@ SSDataBlock* createDataBlock() { SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock)); if (pBlock == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; } pBlock->pDataBlock = taosArrayInit(4, sizeof(SColumnInfoData)); if (pBlock->pDataBlock == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; taosMemoryFree(pBlock); + return NULL; } return pBlock; @@ -1423,6 +1426,7 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) { } void colDataDestroy(SColumnInfoData* pColData) { + if(!pColData) return; if (IS_VAR_DATA_TYPE(pColData->info.type)) { taosMemoryFreeClear(pColData->varmeta.offset); } else { @@ -1703,8 +1707,8 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { } void blockDebugShowDataBlock(SSDataBlock* pBlock, const char* flag) { - SArray* dataBlocks = taosArrayInit(1, sizeof(SSDataBlock)); - taosArrayPush(dataBlocks, pBlock); + SArray* dataBlocks = taosArrayInit(1, sizeof(SSDataBlock*)); + taosArrayPush(dataBlocks, &pBlock); blockDebugShowDataBlocks(dataBlocks, flag); taosArrayDestroy(dataBlocks); } diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c index 8eeab77a157993bd8d89479b221982d3b1e5c336..b40f449a0550140784250b9c2250d191552e4652 100644 --- a/source/common/src/tdataformat.c +++ b/source/common/src/tdataformat.c @@ -1064,6 +1064,26 @@ _err: return code; } +void tTagSetCid(const STag *pTag, int16_t iTag, int16_t cid) { + uint8_t *p = NULL; + int8_t isLarge = pTag->flags & TD_TAG_LARGE; + int16_t offset = 0; + + if (isLarge) { + p = (uint8_t *)&((int16_t *)pTag->idx)[pTag->nTag]; + } else { + p = (uint8_t *)&pTag->idx[pTag->nTag]; + } + + if (isLarge) { + offset = ((int16_t *)pTag->idx)[iTag]; + } else { + offset = pTag->idx[iTag]; + } + + tPutI16v(p + offset, cid); +} + #if 1 // =================================================================================================================== int tdInitTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version) { if (pBuilder == NULL) return -1; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 8823e63db4ab569c0dd55eb6488103897826a060..0bab6a8611b86111884c4e5fb2ca16cee0eef4e7 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -61,6 +61,7 @@ int32_t tsNumOfVnodeStreamThreads = 2; int32_t tsNumOfVnodeFetchThreads = 4; int32_t tsNumOfVnodeWriteThreads = 2; int32_t tsNumOfVnodeSyncThreads = 2; +int32_t tsNumOfVnodeRsmaThreads = 2; int32_t tsNumOfQnodeQueryThreads = 4; int32_t tsNumOfQnodeFetchThreads = 4; int32_t tsNumOfSnodeSharedThreads = 2; @@ -75,8 +76,8 @@ int32_t tsMonitorMaxLogs = 100; bool tsMonitorComp = false; // telem -bool tsEnableTelem = false; -int32_t tsTelemInterval = 86400; +bool tsEnableTelem = true; +int32_t tsTelemInterval = 43200; char tsTelemServer[TSDB_FQDN_LEN] = "telemetry.taosdata.com"; uint16_t tsTelemPort = 80; @@ -164,59 +165,28 @@ int32_t tsMqRebalanceInterval = 2; int32_t tsTtlUnit = 86400; int32_t tsTtlPushInterval = 86400; int32_t tsGrantHBInterval = 60; +int32_t tsUptimeInterval = 300; // seconds -void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary) { - tstrncpy(tsDiskCfg[index].dir, v1, TSDB_FILENAME_LEN); - tsDiskCfg[index].level = level; - tsDiskCfg[index].primary = primary; - uTrace("dataDir:%s, level:%d primary:%d is configured", v1, level, primary); -} - -static int32_t taosSetTfsCfg(SConfig *pCfg) { +#ifndef _STORAGE +int32_t taosSetTfsCfg(SConfig *pCfg) { SConfigItem *pItem = cfgGetItem(pCfg, "dataDir"); memset(tsDataDir, 0, PATH_MAX); int32_t size = taosArrayGetSize(pItem->array); - if (size <= 0) { - tsDiskCfgNum = 1; - taosAddDataDir(0, pItem->str, 0, 1); - tstrncpy(tsDataDir, pItem->str, PATH_MAX); - if (taosMulMkDir(tsDataDir) != 0) { - uError("failed to create dataDir:%s since %s", tsDataDir, terrstr()); - return -1; - } - } else { - tsDiskCfgNum = size < TFS_MAX_DISKS ? size : TFS_MAX_DISKS; - for (int32_t index = 0; index < tsDiskCfgNum; ++index) { - SDiskCfg *pCfg = taosArrayGet(pItem->array, index); - memcpy(&tsDiskCfg[index], pCfg, sizeof(SDiskCfg)); - if (pCfg->level == 0 && pCfg->primary == 1) { - tstrncpy(tsDataDir, pCfg->dir, PATH_MAX); - } - if (taosMulMkDir(pCfg->dir) != 0) { - uError("failed to create tfsDir:%s since %s", tsDataDir, terrstr()); - return -1; - } - } - } - - if (tsDataDir[0] == 0) { - if (pItem->str != NULL) { - taosAddDataDir(tsDiskCfgNum, pItem->str, 0, 1); - tstrncpy(tsDataDir, pItem->str, PATH_MAX); - if (taosMulMkDir(tsDataDir) != 0) { - uError("failed to create tfsDir:%s since %s", tsDataDir, terrstr()); - return -1; - } - tsDiskCfgNum++; - } else { - uError("datadir not set"); - return -1; - } + tsDiskCfgNum = 1; + tstrncpy(tsDiskCfg[0].dir, pItem->str, TSDB_FILENAME_LEN); + tsDiskCfg[0].level = 0; + tsDiskCfg[0].primary = 1; + tstrncpy(tsDataDir, pItem->str, PATH_MAX); + if (taosMulMkDir(tsDataDir) != 0) { + uError("failed to create dataDir:%s", tsDataDir); + return -1; } - return 0; } +#else +int32_t taosSetTfsCfg(SConfig *pCfg); +#endif struct SConfig *taosGetCfg() { return tsCfg; @@ -409,6 +379,10 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 16); if (cfgAddInt32(pCfg, "numOfVnodeSyncThreads", tsNumOfVnodeSyncThreads, 1, 1024, 0) != 0) return -1; + tsNumOfVnodeRsmaThreads = tsNumOfCores; + tsNumOfVnodeRsmaThreads = TMAX(tsNumOfVnodeRsmaThreads, 4); + if (cfgAddInt32(pCfg, "numOfVnodeRsmaThreads", tsNumOfVnodeRsmaThreads, 1, 1024, 0) != 0) return -1; + tsNumOfQnodeQueryThreads = tsNumOfCores * 2; tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4); if (cfgAddInt32(pCfg, "numOfQnodeQueryThreads", tsNumOfQnodeQueryThreads, 1, 1024, 0) != 0) return -1; @@ -446,12 +420,159 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1; if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, 1) != 0) return -1; if (cfgAddInt32(pCfg, "ttlPushInterval", tsTtlPushInterval, 1, 100000, 1) != 0) return -1; + if (cfgAddInt32(pCfg, "uptimeInterval", tsUptimeInterval, 1, 100000, 1) != 0) return -1; if (cfgAddBool(pCfg, "udf", tsStartUdfd, 0) != 0) return -1; GRANT_CFG_ADD; return 0; } +static int32_t taosUpdateServerCfg(SConfig *pCfg) { + SConfigItem *pItem; + ECfgSrcType stype; + int32_t numOfCores; + int64_t totalMemoryKB; + + pItem = cfgGetItem(tsCfg, "numOfCores"); + if (pItem == NULL) { + return -1; + } else { + stype = pItem->stype; + numOfCores = pItem->fval; + } + + pItem = cfgGetItem(tsCfg, "supportVnodes"); + if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { + tsNumOfSupportVnodes = numOfCores * 2; + tsNumOfSupportVnodes = TMAX(tsNumOfSupportVnodes, 2); + pItem->i32 = tsNumOfSupportVnodes; + pItem->stype = stype; + } + + pItem = cfgGetItem(tsCfg, "numOfRpcThreads"); + if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { + tsNumOfRpcThreads = numOfCores / 2; + tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, 4); + pItem->i32 = tsNumOfRpcThreads; + pItem->stype = stype; + } + + pItem = cfgGetItem(tsCfg, "numOfCommitThreads"); + if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { + tsNumOfCommitThreads = numOfCores / 2; + tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4); + pItem->i32 = tsNumOfCommitThreads; + pItem->stype = stype; + } + + pItem = cfgGetItem(tsCfg, "numOfMnodeReadThreads"); + if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { + tsNumOfMnodeReadThreads = numOfCores / 8; + tsNumOfMnodeReadThreads = TRANGE(tsNumOfMnodeReadThreads, 1, 4); + pItem->i32 = tsNumOfMnodeReadThreads; + pItem->stype = stype; + } + + pItem = cfgGetItem(tsCfg, "numOfVnodeQueryThreads"); + if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { + tsNumOfVnodeQueryThreads = numOfCores * 2; + tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 4); + pItem->i32 = tsNumOfVnodeQueryThreads; + pItem->stype = stype; + } + + pItem = cfgGetItem(tsCfg, "numOfVnodeStreamThreads"); + if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { + tsNumOfVnodeStreamThreads = numOfCores / 4; + tsNumOfVnodeStreamThreads = TMAX(tsNumOfVnodeStreamThreads, 4); + pItem->i32 = tsNumOfVnodeStreamThreads; + pItem->stype = stype; + } + + pItem = cfgGetItem(tsCfg, "numOfVnodeFetchThreads"); + if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { + tsNumOfVnodeFetchThreads = numOfCores / 4; + tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4); + pItem->i32 = tsNumOfVnodeFetchThreads; + pItem->stype = stype; + } + + pItem = cfgGetItem(tsCfg, "numOfVnodeWriteThreads"); + if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { + tsNumOfVnodeWriteThreads = numOfCores; + tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1); + pItem->i32 = tsNumOfVnodeWriteThreads; + pItem->stype = stype; + } + + pItem = cfgGetItem(tsCfg, "numOfVnodeSyncThreads"); + if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { + tsNumOfVnodeSyncThreads = numOfCores * 2; + tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 16); + pItem->i32 = tsNumOfVnodeSyncThreads; + pItem->stype = stype; + } + + pItem = cfgGetItem(tsCfg, "numOfVnodeRsmaThreads"); + if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { + tsNumOfVnodeRsmaThreads = numOfCores; + tsNumOfVnodeRsmaThreads = TMAX(tsNumOfVnodeRsmaThreads, 4); + pItem->i32 = tsNumOfVnodeRsmaThreads; + pItem->stype = stype; + } + + pItem = cfgGetItem(tsCfg, "numOfQnodeQueryThreads"); + if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { + tsNumOfQnodeQueryThreads = numOfCores * 2; + tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4); + pItem->i32 = tsNumOfQnodeQueryThreads; + pItem->stype = stype; + } + + pItem = cfgGetItem(tsCfg, "numOfQnodeFetchThreads"); + if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { + tsNumOfQnodeFetchThreads = numOfCores / 2; + tsNumOfQnodeFetchThreads = TMAX(tsNumOfQnodeFetchThreads, 4); + pItem->i32 = tsNumOfQnodeFetchThreads; + pItem->stype = stype; + } + + pItem = cfgGetItem(tsCfg, "numOfSnodeSharedThreads"); + if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { + tsNumOfSnodeSharedThreads = numOfCores / 4; + tsNumOfSnodeSharedThreads = TRANGE(tsNumOfSnodeSharedThreads, 2, 4); + pItem->i32 = tsNumOfSnodeSharedThreads; + pItem->stype = stype; + } + + pItem = cfgGetItem(tsCfg, "numOfSnodeUniqueThreads"); + if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { + tsNumOfSnodeUniqueThreads = numOfCores / 4; + tsNumOfSnodeUniqueThreads = TRANGE(tsNumOfSnodeUniqueThreads, 2, 4); + pItem->i32 = tsNumOfSnodeUniqueThreads; + pItem->stype = stype; + } + + pItem = cfgGetItem(tsCfg, "totalMemoryKB"); + if (pItem == NULL) { + return -1; + } else { + stype = pItem->stype; + totalMemoryKB = pItem->i64; + } + + pItem = cfgGetItem(tsCfg, "rpcQueueMemoryAllowed"); + if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { + tsRpcQueueMemoryAllowed = totalMemoryKB * 1024 * 0.1; + tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL); + pItem->i64 = tsRpcQueueMemoryAllowed; + pItem->stype = stype; + } + + return 0; +} + + static void taosSetClientLogCfg(SConfig *pCfg) { SConfigItem *pItem = cfgGetItem(pCfg, "logDir"); tstrncpy(tsLogDir, cfgGetItem(pCfg, "logDir")->str, PATH_MAX); @@ -570,6 +691,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsNumOfVnodeFetchThreads = cfgGetItem(pCfg, "numOfVnodeFetchThreads")->i32; tsNumOfVnodeWriteThreads = cfgGetItem(pCfg, "numOfVnodeWriteThreads")->i32; tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32; + tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32; tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32; tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32; tsNumOfSnodeSharedThreads = cfgGetItem(pCfg, "numOfSnodeSharedThreads")->i32; @@ -592,6 +714,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32; tsTtlUnit = cfgGetItem(pCfg, "ttlUnit")->i32; tsTtlPushInterval = cfgGetItem(pCfg, "ttlPushInterval")->i32; + tsUptimeInterval = cfgGetItem(pCfg, "uptimeInterval")->i32; tsStartUdfd = cfgGetItem(pCfg, "udf")->bval; @@ -814,6 +937,8 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) { tsNumOfVnodeWriteThreads = cfgGetItem(pCfg, "numOfVnodeWriteThreads")->i32; } else if (strcasecmp("numOfVnodeSyncThreads", name) == 0) { tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32; + } else if (strcasecmp("numOfVnodeRsmaThreads", name) == 0) { + tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32; } else if (strcasecmp("numOfQnodeQueryThreads", name) == 0) { tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32; } else if (strcasecmp("numOfQnodeFetchThreads", name) == 0) { @@ -1002,7 +1127,7 @@ int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDi taosSetServerLogCfg(pCfg); } - taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32); + taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32, false); if (taosMulMkDir(tsLogDir) != 0) { uError("failed to create dir:%s since %s", tsLogDir, terrstr()); @@ -1069,6 +1194,7 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile if (taosSetClientCfg(tsCfg)) return -1; } else { if (taosSetClientCfg(tsCfg)) return -1; + if (taosUpdateServerCfg(tsCfg)) return -1; if (taosSetServerCfg(tsCfg)) return -1; if (taosSetTfsCfg(tsCfg) != 0) return -1; } @@ -1093,7 +1219,7 @@ void taosCleanupCfg() { void taosCfgDynamicOptions(const char *option, const char *value) { if (strncasecmp(option, "debugFlag", 9) == 0) { int32_t flag = atoi(value); - taosSetAllDebugFlag(flag); + taosSetAllDebugFlag(flag, true); return; } @@ -1118,11 +1244,13 @@ void taosCfgDynamicOptions(const char *option, const char *value) { "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag", "tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag", "uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag", + "jniDebugFlag", }; int32_t *optionVars[] = { &dDebugFlag, &vDebugFlag, &mDebugFlag, &wDebugFlag, &sDebugFlag, &tsdbDebugFlag, &tqDebugFlag, &fsDebugFlag, &udfDebugFlag, &smaDebugFlag, &idxDebugFlag, &tdbDebugFlag, &tmrDebugFlag, &uDebugFlag, &smaDebugFlag, &rpcDebugFlag, &qDebugFlag, &metaDebugFlag, + &jniDebugFlag, }; int32_t optionSize = tListLen(options); @@ -1134,41 +1262,42 @@ void taosCfgDynamicOptions(const char *option, const char *value) { int32_t flag = atoi(value); uInfo("%s set from %d to %d", optName, *optionVars[d], flag); *optionVars[d] = flag; - taosSetDebugFlag(optionVars[d], optName, flag); + taosSetDebugFlag(optionVars[d], optName, flag, true); return; } uError("failed to cfg dynamic option:%s value:%s", option, value); } -void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal) { +void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite) { SConfigItem *pItem = cfgGetItem(tsCfg, flagName); - if (pItem != NULL) { + if (pItem != NULL && (rewrite || pItem->i32 == 0)) { pItem->i32 = flagVal; } *pFlagPtr = flagVal; } -void taosSetAllDebugFlag(int32_t flag) { +void taosSetAllDebugFlag(int32_t flag, bool rewrite) { if (flag <= 0) return; - taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag); - taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag); - taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag); - taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag); - taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag); - taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag); - taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag); - taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag); - taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag); - taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag); - taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag); - taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag); - taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag); - taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag); - taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag); - taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag); - taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag); - taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag); + taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag, rewrite); + taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag, rewrite); + taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag, rewrite); + taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag, rewrite); + taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag, rewrite); + taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag, rewrite); + taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag, rewrite); + taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag, rewrite); + taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag, rewrite); + taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag, rewrite); + taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag, rewrite); + taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag, rewrite); + taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag, rewrite); + taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag, rewrite); + taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag, rewrite); + taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag, rewrite); + taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag, rewrite); + taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag, rewrite); + taosSetDebugFlag(&metaDebugFlag, "tmrDebugFlag", flag, rewrite); uInfo("all debug flag are set to %d", flag); } diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 7dd3ce34c3d52495217e27663dd2c7c044c77c4d..af29ab7c50bc1ed7b3746f36f1ea44307cf13694 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -3196,12 +3196,16 @@ static int32_t tDecodeSTableMetaRsp(SDecoder *pDecoder, STableMetaRsp *pRsp) { if (tDecodeI32(pDecoder, &pRsp->vgId) < 0) return -1; int32_t totalCols = pRsp->numOfTags + pRsp->numOfColumns; - pRsp->pSchemas = taosMemoryMalloc(sizeof(SSchema) * totalCols); - if (pRsp->pSchemas == NULL) return -1; + if (totalCols > 0) { + pRsp->pSchemas = taosMemoryMalloc(sizeof(SSchema) * totalCols); + if (pRsp->pSchemas == NULL) return -1; - for (int32_t i = 0; i < totalCols; ++i) { - SSchema *pSchema = &pRsp->pSchemas[i]; - if (tDecodeSSchema(pDecoder, pSchema) < 0) return -1; + for (int32_t i = 0; i < totalCols; ++i) { + SSchema *pSchema = &pRsp->pSchemas[i]; + if (tDecodeSSchema(pDecoder, pSchema) < 0) return -1; + } + } else { + pRsp->pSchemas = NULL; } return 0; @@ -3326,7 +3330,7 @@ int32_t tDeserializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) { return 0; } -void tFreeSTableMetaRsp(STableMetaRsp *pRsp) { taosMemoryFreeClear(pRsp->pSchemas); } +void tFreeSTableMetaRsp(void *pRsp) { taosMemoryFreeClear(((STableMetaRsp *)pRsp)->pSchemas); } void tFreeSTableIndexRsp(void *info) { if (NULL == info) { @@ -3630,6 +3634,7 @@ int32_t tSerializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) { if (tEncodeU32(&encoder, pRsp->connId) < 0) return -1; if (tEncodeI32(&encoder, pRsp->dnodeNum) < 0) return -1; if (tEncodeI8(&encoder, pRsp->superUser) < 0) return -1; + if (tEncodeI8(&encoder, pRsp->sysInfo) < 0) return -1; if (tEncodeI8(&encoder, pRsp->connType) < 0) return -1; if (tEncodeSEpSet(&encoder, &pRsp->epSet) < 0) return -1; if (tEncodeI32(&encoder, pRsp->svrTimestamp) < 0) return -1; @@ -3652,6 +3657,7 @@ int32_t tDeserializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) { if (tDecodeU32(&decoder, &pRsp->connId) < 0) return -1; if (tDecodeI32(&decoder, &pRsp->dnodeNum) < 0) return -1; if (tDecodeI8(&decoder, &pRsp->superUser) < 0) return -1; + if (tDecodeI8(&decoder, &pRsp->sysInfo) < 0) return -1; if (tDecodeI8(&decoder, &pRsp->connType) < 0) return -1; if (tDecodeSEpSet(&decoder, &pRsp->epSet) < 0) return -1; if (tDecodeI32(&decoder, &pRsp->svrTimestamp) < 0) return -1; @@ -4262,7 +4268,6 @@ int32_t tDeserializeSServerStatusRsp(void *buf, int32_t bufLen, SServerStatusRsp tDecoderClear(&decoder); return 0; } - int32_t tEncodeSMqOffset(SEncoder *encoder, const SMqOffset *pOffset) { if (tEncodeI32(encoder, pOffset->vgId) < 0) return -1; if (tEncodeI64(encoder, pOffset->offset) < 0) return -1; @@ -4300,7 +4305,6 @@ int32_t tDecodeSMqCMCommitOffsetReq(SDecoder *decoder, SMqCMCommitOffsetReq *pRe tEndDecode(decoder); return 0; } - int32_t tSerializeSExplainRsp(void *buf, int32_t bufLen, SExplainRsp *pRsp) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); @@ -5092,6 +5096,10 @@ int tEncodeSVCreateTbRsp(SEncoder *pCoder, const SVCreateTbRsp *pRsp) { if (tStartEncode(pCoder) < 0) return -1; if (tEncodeI32(pCoder, pRsp->code) < 0) return -1; + if (tEncodeI32(pCoder, pRsp->pMeta ? 1 : 0) < 0) return -1; + if (pRsp->pMeta) { + if (tEncodeSTableMetaRsp(pCoder, pRsp->pMeta) < 0) return -1; + } tEndEncode(pCoder); return 0; @@ -5102,10 +5110,32 @@ int tDecodeSVCreateTbRsp(SDecoder *pCoder, SVCreateTbRsp *pRsp) { if (tDecodeI32(pCoder, &pRsp->code) < 0) return -1; + int32_t meta = 0; + if (tDecodeI32(pCoder, &meta) < 0) return -1; + if (meta) { + pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp)); + if (NULL == pRsp->pMeta) return -1; + if (tDecodeSTableMetaRsp(pCoder, pRsp->pMeta) < 0) return -1; + } else { + pRsp->pMeta = NULL; + } + tEndDecode(pCoder); return 0; } +void tFreeSVCreateTbRsp(void *param) { + if (NULL == param) { + return; + } + + SVCreateTbRsp *pRsp = (SVCreateTbRsp *)param; + if (pRsp->pMeta) { + taosMemoryFree(pRsp->pMeta->pSchemas); + taosMemoryFree(pRsp->pMeta); + } +} + // TDMT_VND_DROP_TABLE ================= static int32_t tEncodeSVDropTbReq(SEncoder *pCoder, const SVDropTbReq *pReq) { if (tStartEncode(pCoder) < 0) return -1; @@ -5294,6 +5324,10 @@ static int32_t tEncodeSSubmitBlkRsp(SEncoder *pEncoder, const SSubmitBlkRsp *pBl if (tEncodeI32v(pEncoder, pBlock->numOfRows) < 0) return -1; if (tEncodeI32v(pEncoder, pBlock->affectedRows) < 0) return -1; if (tEncodeI64v(pEncoder, pBlock->sver) < 0) return -1; + if (tEncodeI32(pEncoder, pBlock->pMeta ? 1 : 0) < 0) return -1; + if (pBlock->pMeta) { + if (tEncodeSTableMetaRsp(pEncoder, pBlock->pMeta) < 0) return -1; + } tEndEncode(pEncoder); return 0; @@ -5312,6 +5346,16 @@ static int32_t tDecodeSSubmitBlkRsp(SDecoder *pDecoder, SSubmitBlkRsp *pBlock) { if (tDecodeI32v(pDecoder, &pBlock->affectedRows) < 0) return -1; if (tDecodeI64v(pDecoder, &pBlock->sver) < 0) return -1; + int32_t meta = 0; + if (tDecodeI32(pDecoder, &meta) < 0) return -1; + if (meta) { + pBlock->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp)); + if (NULL == pBlock->pMeta) return -1; + if (tDecodeSTableMetaRsp(pDecoder, pBlock->pMeta) < 0) return -1; + } else { + pBlock->pMeta = NULL; + } + tEndDecode(pDecoder); return 0; } @@ -5349,6 +5393,20 @@ int32_t tDecodeSSubmitRsp(SDecoder *pDecoder, SSubmitRsp *pRsp) { return 0; } +void tFreeSSubmitBlkRsp(void *param) { + if (NULL == param) { + return; + } + + SSubmitBlkRsp *pRsp = (SSubmitBlkRsp *)param; + + taosMemoryFree(pRsp->tblFName); + if (pRsp->pMeta) { + taosMemoryFree(pRsp->pMeta->pSchemas); + taosMemoryFree(pRsp->pMeta); + } +} + void tFreeSSubmitRsp(SSubmitRsp *pRsp) { if (NULL == pRsp) return; @@ -5560,9 +5618,60 @@ void tFreeSMAlterStbRsp(SMAlterStbRsp *pRsp) { } } +int32_t tEncodeSMCreateStbRsp(SEncoder *pEncoder, const SMCreateStbRsp *pRsp) { + if (tStartEncode(pEncoder) < 0) return -1; + if (tEncodeI32(pEncoder, pRsp->pMeta->pSchemas ? 1 : 0) < 0) return -1; + if (pRsp->pMeta->pSchemas) { + if (tEncodeSTableMetaRsp(pEncoder, pRsp->pMeta) < 0) return -1; + } + tEndEncode(pEncoder); + return 0; +} + +int32_t tDecodeSMCreateStbRsp(SDecoder *pDecoder, SMCreateStbRsp *pRsp) { + int32_t meta = 0; + if (tStartDecode(pDecoder) < 0) return -1; + if (tDecodeI32(pDecoder, &meta) < 0) return -1; + if (meta) { + pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp)); + if (NULL == pRsp->pMeta) return -1; + if (tDecodeSTableMetaRsp(pDecoder, pRsp->pMeta) < 0) return -1; + } + tEndDecode(pDecoder); + return 0; +} + +int32_t tDeserializeSMCreateStbRsp(void *buf, int32_t bufLen, SMCreateStbRsp *pRsp) { + int32_t meta = 0; + SDecoder decoder = {0}; + tDecoderInit(&decoder, buf, bufLen); + + if (tStartDecode(&decoder) < 0) return -1; + if (tDecodeI32(&decoder, &meta) < 0) return -1; + if (meta) { + pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp)); + if (NULL == pRsp->pMeta) return -1; + if (tDecodeSTableMetaRsp(&decoder, pRsp->pMeta) < 0) return -1; + } + tEndDecode(&decoder); + tDecoderClear(&decoder); + return 0; +} + +void tFreeSMCreateStbRsp(SMCreateStbRsp *pRsp) { + if (NULL == pRsp) { + return; + } + + if (pRsp->pMeta) { + taosMemoryFree(pRsp->pMeta->pSchemas); + taosMemoryFree(pRsp->pMeta); + } +} + int32_t tEncodeSTqOffsetVal(SEncoder *pEncoder, const STqOffsetVal *pOffsetVal) { if (tEncodeI8(pEncoder, pOffsetVal->type) < 0) return -1; - if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA) { + if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) { if (tEncodeI64(pEncoder, pOffsetVal->uid) < 0) return -1; if (tEncodeI64(pEncoder, pOffsetVal->ts) < 0) return -1; } else if (pOffsetVal->type == TMQ_OFFSET__LOG) { @@ -5577,7 +5686,7 @@ int32_t tEncodeSTqOffsetVal(SEncoder *pEncoder, const STqOffsetVal *pOffsetVal) int32_t tDecodeSTqOffsetVal(SDecoder *pDecoder, STqOffsetVal *pOffsetVal) { if (tDecodeI8(pDecoder, &pOffsetVal->type) < 0) return -1; - if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA) { + if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) { if (tDecodeI64(pDecoder, &pOffsetVal->uid) < 0) return -1; if (tDecodeI64(pDecoder, &pOffsetVal->ts) < 0) return -1; } else if (pOffsetVal->type == TMQ_OFFSET__LOG) { @@ -5590,7 +5699,6 @@ int32_t tDecodeSTqOffsetVal(SDecoder *pDecoder, STqOffsetVal *pOffsetVal) { return 0; } -#if 1 int32_t tFormatOffset(char *buf, int32_t maxLen, const STqOffsetVal *pVal) { if (pVal->type == TMQ_OFFSET__RESET_NONE) { snprintf(buf, maxLen, "offset(reset to none)"); @@ -5600,16 +5708,13 @@ int32_t tFormatOffset(char *buf, int32_t maxLen, const STqOffsetVal *pVal) { snprintf(buf, maxLen, "offset(reset to latest)"); } else if (pVal->type == TMQ_OFFSET__LOG) { snprintf(buf, maxLen, "offset(log) ver:%" PRId64, pVal->version); - } else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA) { + } else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pVal->type == TMQ_OFFSET__SNAPSHOT_META) { snprintf(buf, maxLen, "offset(ss data) uid:%" PRId64 ", ts:%" PRId64, pVal->uid, pVal->ts); - } else if (pVal->type == TMQ_OFFSET__SNAPSHOT_META) { - snprintf(buf, maxLen, "offset(ss meta) uid:%" PRId64 ", ts:%" PRId64, pVal->uid, pVal->ts); } else { ASSERT(0); } return 0; } -#endif bool tOffsetEqual(const STqOffsetVal *pLeft, const STqOffsetVal *pRight) { if (pLeft->type == pRight->type) { @@ -5618,9 +5723,7 @@ bool tOffsetEqual(const STqOffsetVal *pLeft, const STqOffsetVal *pRight) { } else if (pLeft->type == TMQ_OFFSET__SNAPSHOT_DATA) { return pLeft->uid == pRight->uid && pLeft->ts == pRight->ts; } else if (pLeft->type == TMQ_OFFSET__SNAPSHOT_META) { - ASSERT(0); - // TODO - return pLeft->uid == pRight->uid && pLeft->ts == pRight->ts; + return pLeft->uid == pRight->uid; } else { ASSERT(0); /*ASSERT(pLeft->type == TMQ_OFFSET__RESET_NONE || pLeft->type == TMQ_OFFSET__RESET_EARLIEAST ||*/ @@ -5643,7 +5746,7 @@ int32_t tDecodeSTqOffset(SDecoder *pDecoder, STqOffset *pOffset) { return 0; } -int32_t tEncodeSCheckAlterInfo(SEncoder *pEncoder, const SCheckAlterInfo *pInfo) { +int32_t tEncodeSTqCheckInfo(SEncoder *pEncoder, const STqCheckInfo *pInfo) { if (tEncodeCStr(pEncoder, pInfo->topic) < 0) return -1; if (tEncodeI64(pEncoder, pInfo->ntbUid) < 0) return -1; int32_t sz = taosArrayGetSize(pInfo->colIdList); @@ -5655,7 +5758,7 @@ int32_t tEncodeSCheckAlterInfo(SEncoder *pEncoder, const SCheckAlterInfo *pInfo) return pEncoder->pos; } -int32_t tDecodeSCheckAlterInfo(SDecoder *pDecoder, SCheckAlterInfo *pInfo) { +int32_t tDecodeSTqCheckInfo(SDecoder *pDecoder, STqCheckInfo *pInfo) { if (tDecodeCStrTo(pDecoder, pInfo->topic) < 0) return -1; if (tDecodeI64(pDecoder, &pInfo->ntbUid) < 0) return -1; int32_t sz; @@ -5705,6 +5808,21 @@ int32_t tDecodeDeleteRes(SDecoder *pCoder, SDeleteRes *pRes) { if (tDecodeCStrTo(pCoder, pRes->tsColName) < 0) return -1; return 0; } + +int32_t tEncodeSMqMetaRsp(SEncoder *pEncoder, const SMqMetaRsp *pRsp) { + if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1; + if (tEncodeI16(pEncoder, pRsp->resMsgType)) return -1; + if (tEncodeBinary(pEncoder, pRsp->metaRsp, pRsp->metaRspLen)) return -1; + return 0; +} + +int32_t tDecodeSMqMetaRsp(SDecoder *pDecoder, SMqMetaRsp *pRsp) { + if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1; + if (tDecodeI16(pDecoder, &pRsp->resMsgType) < 0) return -1; + if (tDecodeBinaryAlloc(pDecoder, &pRsp->metaRsp, (uint64_t *)&pRsp->metaRspLen) < 0) return -1; + return 0; +} + int32_t tEncodeSMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) { if (tEncodeSTqOffsetVal(pEncoder, &pRsp->reqOffset) < 0) return -1; if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1; @@ -5771,6 +5889,99 @@ int32_t tDecodeSMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) { return 0; } +void tDeleteSMqDataRsp(SMqDataRsp *pRsp) { + taosArrayDestroy(pRsp->blockDataLen); + taosArrayDestroyP(pRsp->blockData, (FDelete)taosMemoryFree); + taosArrayDestroyP(pRsp->blockSchema, (FDelete)tDeleteSSchemaWrapper); + taosArrayDestroyP(pRsp->blockTbName, (FDelete)taosMemoryFree); +} + +int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const STaosxRsp *pRsp) { + if (tEncodeSTqOffsetVal(pEncoder, &pRsp->reqOffset) < 0) return -1; + if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1; + if (tEncodeI32(pEncoder, pRsp->blockNum) < 0) return -1; + if (pRsp->blockNum != 0) { + if (tEncodeI8(pEncoder, pRsp->withTbName) < 0) return -1; + if (tEncodeI8(pEncoder, pRsp->withSchema) < 0) return -1; + + for (int32_t i = 0; i < pRsp->blockNum; i++) { + int32_t bLen = *(int32_t *)taosArrayGet(pRsp->blockDataLen, i); + void *data = taosArrayGetP(pRsp->blockData, i); + if (tEncodeBinary(pEncoder, (const uint8_t *)data, bLen) < 0) return -1; + if (pRsp->withSchema) { + SSchemaWrapper *pSW = (SSchemaWrapper *)taosArrayGetP(pRsp->blockSchema, i); + if (tEncodeSSchemaWrapper(pEncoder, pSW) < 0) return -1; + } + if (pRsp->withTbName) { + char *tbName = (char *)taosArrayGetP(pRsp->blockTbName, i); + if (tEncodeCStr(pEncoder, tbName) < 0) return -1; + } + } + } + if (tEncodeI32(pEncoder, pRsp->createTableNum) < 0) return -1; + if (pRsp->createTableNum) { + for (int32_t i = 0; i < pRsp->createTableNum; i++) { + void *createTableReq = taosArrayGetP(pRsp->createTableReq, i); + int32_t createTableLen = *(int32_t *)taosArrayGet(pRsp->createTableLen, i); + if (tEncodeBinary(pEncoder, createTableReq, createTableLen) < 0) return -1; + } + } + return 0; +} + +int32_t tDecodeSTaosxRsp(SDecoder *pDecoder, STaosxRsp *pRsp) { + if (tDecodeSTqOffsetVal(pDecoder, &pRsp->reqOffset) < 0) return -1; + if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1; + if (tDecodeI32(pDecoder, &pRsp->blockNum) < 0) return -1; + if (pRsp->blockNum != 0) { + pRsp->blockData = taosArrayInit(pRsp->blockNum, sizeof(void *)); + pRsp->blockDataLen = taosArrayInit(pRsp->blockNum, sizeof(int32_t)); + if (tDecodeI8(pDecoder, &pRsp->withTbName) < 0) return -1; + if (tDecodeI8(pDecoder, &pRsp->withSchema) < 0) return -1; + if (pRsp->withTbName) { + pRsp->blockTbName = taosArrayInit(pRsp->blockNum, sizeof(void *)); + } + if (pRsp->withSchema) { + pRsp->blockSchema = taosArrayInit(pRsp->blockNum, sizeof(void *)); + } + + for (int32_t i = 0; i < pRsp->blockNum; i++) { + void *data; + uint64_t bLen; + if (tDecodeBinaryAlloc(pDecoder, &data, &bLen) < 0) return -1; + taosArrayPush(pRsp->blockData, &data); + int32_t len = bLen; + taosArrayPush(pRsp->blockDataLen, &len); + + if (pRsp->withSchema) { + SSchemaWrapper *pSW = (SSchemaWrapper *)taosMemoryCalloc(1, sizeof(SSchemaWrapper)); + if (pSW == NULL) return -1; + if (tDecodeSSchemaWrapper(pDecoder, pSW) < 0) return -1; + taosArrayPush(pRsp->blockSchema, &pSW); + } + + if (pRsp->withTbName) { + char *tbName; + if (tDecodeCStrAlloc(pDecoder, &tbName) < 0) return -1; + taosArrayPush(pRsp->blockTbName, &tbName); + } + } + } + if (tDecodeI32(pDecoder, &pRsp->createTableNum) < 0) return -1; + if (pRsp->createTableNum) { + pRsp->createTableLen = taosArrayInit(pRsp->createTableNum, sizeof(int32_t)); + pRsp->createTableReq = taosArrayInit(pRsp->createTableNum, sizeof(void *)); + for (int32_t i = 0; i < pRsp->createTableNum; i++) { + void *pCreate = NULL; + uint64_t len; + if (tDecodeBinaryAlloc(pDecoder, &pCreate, &len) < 0) return -1; + int32_t l = (int32_t)len; + taosArrayPush(pRsp->createTableLen, &l); + taosArrayPush(pRsp->createTableReq, &pCreate); + } + } + return 0; +} int32_t tEncodeSSingleDeleteReq(SEncoder *pEncoder, const SSingleDeleteReq *pReq) { if (tEncodeI64(pEncoder, pReq->uid) < 0) return -1; if (tEncodeI64(pEncoder, pReq->ts) < 0) return -1; diff --git a/source/common/src/ttypes.c b/source/common/src/ttypes.c index 156b66ae865e8a1f4b1f7562961d9f45afb688c6..fee89e2f37e3afeec0959b1d78b9a73745573edc 100644 --- a/source/common/src/ttypes.c +++ b/source/common/src/ttypes.c @@ -392,10 +392,10 @@ tDataTypeDescriptor tDataTypes[TSDB_DATA_TYPE_MAX] = { getStatics_i64}, {TSDB_DATA_TYPE_FLOAT, 5, FLOAT_BYTES, "FLOAT", 0, 0, tsCompressFloat, tsDecompressFloat, getStatics_f}, {TSDB_DATA_TYPE_DOUBLE, 6, DOUBLE_BYTES, "DOUBLE", 0, 0, tsCompressDouble, tsDecompressDouble, getStatics_d}, - {TSDB_DATA_TYPE_VARCHAR, 6, 0, "VARCHAR", 0, 0, tsCompressString, tsDecompressString, getStatics_bin}, + {TSDB_DATA_TYPE_VARCHAR, 6, 1, "VARCHAR", 0, 0, tsCompressString, tsDecompressString, getStatics_bin}, {TSDB_DATA_TYPE_TIMESTAMP, 9, LONG_BYTES, "TIMESTAMP", INT64_MIN, INT64_MAX, tsCompressTimestamp, tsDecompressTimestamp, getStatics_i64}, - {TSDB_DATA_TYPE_NCHAR, 5, 8, "NCHAR", 0, 0, tsCompressString, tsDecompressString, getStatics_nchr}, + {TSDB_DATA_TYPE_NCHAR, 5, 1, "NCHAR", 0, 0, tsCompressString, tsDecompressString, getStatics_nchr}, {TSDB_DATA_TYPE_UTINYINT, 16, CHAR_BYTES, "TINYINT UNSIGNED", 0, UINT8_MAX, tsCompressTinyint, tsDecompressTinyint, getStatics_u8}, {TSDB_DATA_TYPE_USMALLINT, 17, SHORT_BYTES, "SMALLINT UNSIGNED", 0, UINT16_MAX, tsCompressSmallint, diff --git a/source/common/src/tvariant.c b/source/common/src/tvariant.c index 0810be149716e58fdac74b67db6946fde7db62e9..a01c393441c0a4b6945226ba2c77ffe1a23ced57 100644 --- a/source/common/src/tvariant.c +++ b/source/common/src/tvariant.c @@ -155,8 +155,8 @@ void taosVariantCreateFromBinary(SVariant *pVar, const char *pz, size_t len, uin void taosVariantDestroy(SVariant *pVar) { if (pVar == NULL) return; - if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR - || pVar->nType == TSDB_DATA_TYPE_JSON) { + if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR || + pVar->nType == TSDB_DATA_TYPE_JSON) { taosMemoryFreeClear(pVar->pz); pVar->nLen = 0; } @@ -185,8 +185,8 @@ void taosVariantAssign(SVariant *pDst, const SVariant *pSrc) { if (pSrc == NULL || pDst == NULL) return; pDst->nType = pSrc->nType; - if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR - || pSrc->nType == TSDB_DATA_TYPE_JSON) { + if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR || + pSrc->nType == TSDB_DATA_TYPE_JSON) { int32_t len = pSrc->nLen + TSDB_NCHAR_SIZE; char *p = taosMemoryRealloc(pDst->pz, len); assert(p); diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c index 647af20fcf24e47b27b02d69595f8d1555a4cc19..ec761e6441ce651db5d6f3b034e4f11b1686fcdc 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c @@ -225,7 +225,8 @@ SArray *mmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_SMA_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_CHANGE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_DELETE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_CHECK_ALTER_INFO_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_ADD_CHECK_INFO_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_DELETE_CHECK_INFO_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_SCH_DROP_TASK, mmPutMsgToFetchQueue, 1) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DEPLOY_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DROP_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 7c6807ab87220b8cbaecddfd9b0278c0b13aa0fe..e610b41a04dc7792638a251fa379bcacb37e0050 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -338,6 +338,7 @@ SArray *vmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_SCH_MERGE_QUERY, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_SCH_QUERY_CONTINUE, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH_RSMA, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_EXEC_RSMA, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_SCH_FETCH, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_SCH_MERGE_FETCH, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_TABLE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; @@ -361,7 +362,8 @@ SArray *vmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_CHANGE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_DELETE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_COMMIT_OFFSET, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_CHECK_ALTER_INFO, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_ADD_CHECK_INFO, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_DELETE_CHECK_INFO, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_CONSUME, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_DELETE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_BATCH_DEL, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; diff --git a/source/dnode/mnode/impl/inc/mndCluster.h b/source/dnode/mnode/impl/inc/mndCluster.h index 0de253fb6adebf053eb1eb6afc68693f1fbc6747..2cb41edd7c1d37c8dab6f0e276259e9cc530fea8 100644 --- a/source/dnode/mnode/impl/inc/mndCluster.h +++ b/source/dnode/mnode/impl/inc/mndCluster.h @@ -27,6 +27,7 @@ void mndCleanupCluster(SMnode *pMnode); int32_t mndGetClusterName(SMnode *pMnode, char *clusterName, int32_t len); int64_t mndGetClusterId(SMnode *pMnode); int64_t mndGetClusterCreateTime(SMnode *pMnode); +float mndGetClusterUpTime(SMnode *pMnode); #ifdef __cplusplus } diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 455da6a40e9620d29bae8079fa98e8a029903eea..ea05215fe90d30708013fe4b1c8fc08d2be8d3d6 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -179,6 +179,7 @@ typedef struct { char name[TSDB_CLUSTER_ID_LEN]; int64_t createdTime; int64_t updateTime; + int32_t upTime; } SClusterObj; typedef struct { @@ -636,6 +637,7 @@ typedef struct { int32_t tEncodeSStreamObj(SEncoder* pEncoder, const SStreamObj* pObj); int32_t tDecodeSStreamObj(SDecoder* pDecoder, SStreamObj* pObj); +void tFreeStreamObj(SStreamObj* pObj); typedef struct { char streamName[TSDB_STREAM_FNAME_LEN]; diff --git a/source/dnode/mnode/impl/inc/mndInfoSchema.h b/source/dnode/mnode/impl/inc/mndInfoSchema.h index b10d92ee3de1a0e06d801c9a8840751a9f52f37c..4f98465cd170280d8c9f5e9356c37cebf26f9bd0 100644 --- a/source/dnode/mnode/impl/inc/mndInfoSchema.h +++ b/source/dnode/mnode/impl/inc/mndInfoSchema.h @@ -24,7 +24,8 @@ extern "C" { int32_t mndInitInfos(SMnode *pMnode); void mndCleanupInfos(SMnode *pMnode); -int32_t mndBuildInsTableSchema(SMnode *pMnode, const char *dbFName, const char *tbName, STableMetaRsp *pRsp); +int32_t mndBuildInsTableSchema(SMnode *pMnode, const char *dbFName, const char *tbName, bool sysinfo, + STableMetaRsp *pRsp); int32_t mndBuildInsTableCfg(SMnode *pMnode, const char *dbFName, const char *tbName, STableCfgRsp *pRsp); #ifdef __cplusplus diff --git a/source/dnode/mnode/impl/inc/mndStb.h b/source/dnode/mnode/impl/inc/mndStb.h index 44a7fdadde7227fd75303946a950152834271f0b..8f0d55e10061ce4517c4305ae7450a7439b91cfd 100644 --- a/source/dnode/mnode/impl/inc/mndStb.h +++ b/source/dnode/mnode/impl/inc/mndStb.h @@ -34,6 +34,8 @@ int32_t mndCheckCreateStbReq(SMCreateStbReq *pCreate); SDbObj *mndAcquireDbByStb(SMnode *pMnode, const char *stbName); int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreate, SDbObj *pDb); int32_t mndAddStbToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb); +void mndFreeStb(SStbObj *pStb); +int32_t mndBuildSMCreateStbRsp(SMnode *pMnode, char* dbFName, char* stbFName, void **pCont, int32_t *pLen); void mndExtractDbNameFromStbFullName(const char *stbFullName, char *dst); void mndExtractTbNameFromStbFullName(const char *stbFullName, char *dst, int32_t dstSize); diff --git a/source/dnode/mnode/impl/src/mndCluster.c b/source/dnode/mnode/impl/src/mndCluster.c index a82bf739f5feae48e444a7100f45aef19f7c6803..7d633f90bd937c24b82094bdc0fa6d30c30bc250 100644 --- a/source/dnode/mnode/impl/src/mndCluster.c +++ b/source/dnode/mnode/impl/src/mndCluster.c @@ -19,7 +19,7 @@ #include "mndTrans.h" #define CLUSTER_VER_NUMBE 1 -#define CLUSTER_RESERVE_SIZE 64 +#define CLUSTER_RESERVE_SIZE 60 static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster); static SSdbRow *mndClusterActionDecode(SSdbRaw *pRaw); @@ -29,6 +29,7 @@ static int32_t mndClusterActionUpdate(SSdb *pSdb, SClusterObj *pOldCluster, SCl static int32_t mndCreateDefaultCluster(SMnode *pMnode); static int32_t mndRetrieveClusters(SRpcMsg *pMsg, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); static void mndCancelGetNextCluster(SMnode *pMnode, void *pIter); +static int32_t mndProcessUptimeTimer(SRpcMsg *pReq); int32_t mndInitCluster(SMnode *pMnode) { SSdbTable table = { @@ -42,8 +43,10 @@ int32_t mndInitCluster(SMnode *pMnode) { .deleteFp = (SdbDeleteFp)mndClusterActionDelete, }; + mndSetMsgHandle(pMnode, TDMT_MND_UPTIME_TIMER, mndProcessUptimeTimer); mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_CLUSTER, mndRetrieveClusters); mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_CLUSTER, mndCancelGetNextCluster); + return sdbSetTable(pMnode->pSdb, table); } @@ -62,40 +65,69 @@ int32_t mndGetClusterName(SMnode *pMnode, char *clusterName, int32_t len) { return 0; } -int64_t mndGetClusterId(SMnode *pMnode) { - SSdb *pSdb = pMnode->pSdb; - void *pIter = NULL; - int64_t clusterId = -1; +static SClusterObj *mndAcquireCluster(SMnode *pMnode) { + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; while (1) { SClusterObj *pCluster = NULL; pIter = sdbFetch(pSdb, SDB_CLUSTER, pIter, (void **)&pCluster); if (pIter == NULL) break; + return pCluster; + } + + return NULL; +} + +static void mndReleaseCluster(SMnode *pMnode, SClusterObj *pCluster) { + SSdb *pSdb = pMnode->pSdb; + sdbRelease(pSdb, pCluster); +} + +int64_t mndGetClusterId(SMnode *pMnode) { + int64_t clusterId = 0; + SClusterObj *pCluster = mndAcquireCluster(pMnode); + if (pCluster != NULL) { clusterId = pCluster->id; - sdbRelease(pSdb, pCluster); + mndReleaseCluster(pMnode, pCluster); } return clusterId; } int64_t mndGetClusterCreateTime(SMnode *pMnode) { - SSdb *pSdb = pMnode->pSdb; - void *pIter = NULL; - int64_t createTime = INT64_MAX; - - while (1) { - SClusterObj *pCluster = NULL; - pIter = sdbFetch(pSdb, SDB_CLUSTER, pIter, (void **)&pCluster); - if (pIter == NULL) break; - + int64_t createTime = 0; + SClusterObj *pCluster = mndAcquireCluster(pMnode); + if (pCluster != NULL) { createTime = pCluster->createdTime; - sdbRelease(pSdb, pCluster); + mndReleaseCluster(pMnode, pCluster); } return createTime; } +static int32_t mndGetClusterUpTimeImp(SClusterObj *pCluster) { +#if 0 + int32_t upTime = taosGetTimestampSec() - pCluster->updateTime / 1000; + upTime = upTime + pCluster->upTime; + return upTime; +#else + return pCluster->upTime; +#endif +} + +float mndGetClusterUpTime(SMnode *pMnode) { + int64_t upTime = 0; + SClusterObj *pCluster = mndAcquireCluster(pMnode); + if (pCluster != NULL) { + upTime = mndGetClusterUpTimeImp(pCluster); + mndReleaseCluster(pMnode, pCluster); + } + + return upTime / 86400.0f; +} + static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -107,6 +139,7 @@ static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster) { SDB_SET_INT64(pRaw, dataPos, pCluster->createdTime, _OVER) SDB_SET_INT64(pRaw, dataPos, pCluster->updateTime, _OVER) SDB_SET_BINARY(pRaw, dataPos, pCluster->name, TSDB_CLUSTER_ID_LEN, _OVER) + SDB_SET_INT32(pRaw, dataPos, pCluster->upTime, _OVER) SDB_SET_RESERVE(pRaw, dataPos, CLUSTER_RESERVE_SIZE, _OVER) terrno = 0; @@ -144,6 +177,7 @@ static SSdbRow *mndClusterActionDecode(SSdbRaw *pRaw) { SDB_GET_INT64(pRaw, dataPos, &pCluster->createdTime, _OVER) SDB_GET_INT64(pRaw, dataPos, &pCluster->updateTime, _OVER) SDB_GET_BINARY(pRaw, dataPos, pCluster->name, TSDB_CLUSTER_ID_LEN, _OVER) + SDB_GET_INT32(pRaw, dataPos, &pCluster->upTime, _OVER) SDB_GET_RESERVE(pRaw, dataPos, CLUSTER_RESERVE_SIZE, _OVER) terrno = 0; @@ -162,6 +196,7 @@ _OVER: static int32_t mndClusterActionInsert(SSdb *pSdb, SClusterObj *pCluster) { mTrace("cluster:%" PRId64 ", perform insert action, row:%p", pCluster->id, pCluster); pSdb->pMnode->clusterId = pCluster->id; + pCluster->updateTime = taosGetTimestampMs(); return 0; } @@ -171,7 +206,10 @@ static int32_t mndClusterActionDelete(SSdb *pSdb, SClusterObj *pCluster) { } static int32_t mndClusterActionUpdate(SSdb *pSdb, SClusterObj *pOld, SClusterObj *pNew) { - mTrace("cluster:%" PRId64 ", perform update action, old row:%p new row:%p", pOld->id, pOld, pNew); + mTrace("cluster:%" PRId64 ", perform update action, old row:%p new row:%p, uptime from %d to %d", pOld->id, pOld, + pNew, pOld->upTime, pNew->upTime); + pOld->upTime = pNew->upTime; + pOld->updateTime = taosGetTimestampMs(); return 0; } @@ -242,6 +280,10 @@ static int32_t mndRetrieveClusters(SRpcMsg *pMsg, SShowObj *pShow, SSDataBlock * pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, buf, false); + int32_t upTime = mndGetClusterUpTimeImp(pCluster); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&upTime, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)&pCluster->createdTime, false); @@ -257,3 +299,40 @@ static void mndCancelGetNextCluster(SMnode *pMnode, void *pIter) { SSdb *pSdb = pMnode->pSdb; sdbCancelFetch(pSdb, pIter); } + +static int32_t mndProcessUptimeTimer(SRpcMsg *pReq) { + SMnode *pMnode = pReq->info.node; + SClusterObj clusterObj = {0}; + SClusterObj *pCluster = mndAcquireCluster(pMnode); + if (pCluster != NULL) { + memcpy(&clusterObj, pCluster, sizeof(SClusterObj)); + clusterObj.upTime += tsUptimeInterval; + mndReleaseCluster(pMnode, pCluster); + } + + if (clusterObj.id <= 0) { + mError("can't get cluster info while update uptime"); + return 0; + } + + mTrace("update cluster uptime to %" PRId64, clusterObj.upTime); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq); + if (pTrans == NULL) return -1; + + SSdbRaw *pCommitRaw = mndClusterActionEncode(&clusterObj); + if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { + mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); + mndTransDrop(pTrans); + return -1; + } + sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); + + if (mndTransPrepare(pMnode, pTrans) != 0) { + mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); + mndTransDrop(pTrans); + return -1; + } + + mndTransDrop(pTrans); + return 0; +} diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 853ace79fd79bd2c30684446d0c12f5640eb881c..8c1c3ba8735f21684b5f9577b9fad20beec110a7 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -1731,7 +1731,7 @@ static int32_t mndRetrieveDbs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc SDbObj infoschemaDb = {0}; setInformationSchemaDbCfg(&infoschemaDb); size_t numOfTables = 0; - getInfosDbMeta(NULL, &numOfTables); + getVisibleInfosTablesNum(sysinfo, &numOfTables); mndDumpDbInfoData(pMnode, pBlock, &infoschemaDb, pShow, numOfRows, numOfTables, true, 0, 1); numOfRows += 1; diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c index 08ce161409037316478b083750187fd10a7f8b9e..e6f1a40993fcb7adf2fec121b5e99374c48aae8f 100644 --- a/source/dnode/mnode/impl/src/mndDef.c +++ b/source/dnode/mnode/impl/src/mndDef.c @@ -116,6 +116,25 @@ int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj) { return 0; } +void tFreeStreamObj(SStreamObj *pStream) { + taosMemoryFree(pStream->sql); + taosMemoryFree(pStream->ast); + taosMemoryFree(pStream->physicalPlan); + if (pStream->outputSchema.nCols) taosMemoryFree(pStream->outputSchema.pSchema); + + int32_t sz = taosArrayGetSize(pStream->tasks); + for (int32_t i = 0; i < sz; i++) { + SArray *pLevel = taosArrayGetP(pStream->tasks, i); + int32_t taskSz = taosArrayGetSize(pLevel); + for (int32_t j = 0; j < taskSz; j++) { + SStreamTask *pTask = taosArrayGetP(pLevel, j); + tFreeSStreamTask(pTask); + } + taosArrayDestroy(pLevel); + } + taosArrayDestroy(pStream->tasks); +} + SMqVgEp *tCloneSMqVgEp(const SMqVgEp *pVgEp) { SMqVgEp *pVgEpNew = taosMemoryMalloc(sizeof(SMqVgEp)); if (pVgEpNew == NULL) return NULL; diff --git a/source/dnode/mnode/impl/src/mndInfoSchema.c b/source/dnode/mnode/impl/src/mndInfoSchema.c index bf33cf603f68ccedfdf69c972441021bdbcb0a53..09172115f8502e392c1d37ae1d256761afb02126 100644 --- a/source/dnode/mnode/impl/src/mndInfoSchema.c +++ b/source/dnode/mnode/impl/src/mndInfoSchema.c @@ -14,8 +14,8 @@ */ #define _DEFAULT_SOURCE -#include "systable.h" #include "mndInt.h" +#include "systable.h" static int32_t mndInitInfosTableSchema(const SSysDbTableSchema *pSrc, int32_t colNum, SSchema **pDst) { SSchema *schema = taosMemoryCalloc(colNum, sizeof(SSchema)); @@ -29,6 +29,9 @@ static int32_t mndInitInfosTableSchema(const SSysDbTableSchema *pSrc, int32_t co schema[i].type = pSrc[i].type; schema[i].colId = i + 1; schema[i].bytes = pSrc[i].bytes; + if (pSrc[i].sysInfo) { + schema[i].flags |= COL_IS_SYSINFO; + } } *pDst = schema; @@ -43,13 +46,14 @@ static int32_t mndInsInitMeta(SHashObj *hash) { meta.sversion = 1; meta.tversion = 1; - size_t size = 0; - const SSysTableMeta* pInfosTableMeta = NULL; + size_t size = 0; + const SSysTableMeta *pInfosTableMeta = NULL; getInfosDbMeta(&pInfosTableMeta, &size); for (int32_t i = 0; i < size; ++i) { tstrncpy(meta.tbName, pInfosTableMeta[i].name, sizeof(meta.tbName)); meta.numOfColumns = pInfosTableMeta[i].colNum; + meta.sysInfo = pInfosTableMeta[i].sysInfo; if (mndInitInfosTableSchema(pInfosTableMeta[i].schema, pInfosTableMeta[i].colNum, &meta.pSchemas)) { return -1; @@ -64,14 +68,15 @@ static int32_t mndInsInitMeta(SHashObj *hash) { return 0; } -int32_t mndBuildInsTableSchema(SMnode *pMnode, const char *dbFName, const char *tbName, STableMetaRsp *pRsp) { +int32_t mndBuildInsTableSchema(SMnode *pMnode, const char *dbFName, const char *tbName, bool sysinfo, + STableMetaRsp *pRsp) { if (NULL == pMnode->infosMeta) { terrno = TSDB_CODE_APP_NOT_READY; return -1; } STableMetaRsp *pMeta = taosHashGet(pMnode->infosMeta, tbName, strlen(tbName)); - if (NULL == pMeta) { + if (NULL == pMeta || (!sysinfo && pMeta->sysInfo)) { mError("invalid information schema table name:%s", tbName); terrno = TSDB_CODE_MND_INVALID_SYS_TABLENAME; return -1; @@ -121,7 +126,6 @@ int32_t mndBuildInsTableCfg(SMnode *pMnode, const char *dbFName, const char *tbN return 0; } - int32_t mndInitInfos(SMnode *pMnode) { pMnode->infosMeta = taosHashInit(20, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), false, HASH_NO_LOCK); if (pMnode->infosMeta == NULL) { diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index c3296ac5c10910136a4bb279d8188336b03ce188..2221718023c8d080059736fd811c946618fd948d 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -100,6 +100,16 @@ static void mndGrantHeartBeat(SMnode *pMnode) { } } +static void mndIncreaseUpTime(SMnode *pMnode) { + int32_t contLen = 0; + void *pReq = mndBuildTimerMsg(&contLen); + if (pReq != NULL) { + SRpcMsg rpcMsg = { + .msgType = TDMT_MND_UPTIME_TIMER, .pCont = pReq, .contLen = contLen, .info.ahandle = (void *)0x9528}; + tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + } +} + static void *mndThreadFp(void *param) { SMnode *pMnode = param; int64_t lastTime = 0; @@ -122,13 +132,17 @@ static void *mndThreadFp(void *param) { mndCalMqRebalance(pMnode); } - if (lastTime % (tsTelemInterval * 10) == 0) { + if (lastTime % (tsTelemInterval * 10) == ((tsTelemInterval - 1) * 10)) { mndPullupTelem(pMnode); } if (lastTime % (tsGrantHBInterval * 10) == 0) { mndGrantHeartBeat(pMnode); } + + if ((lastTime % (tsUptimeInterval * 10)) == ((tsUptimeInterval - 1) * 10)) { + mndIncreaseUpTime(pMnode); + } } return NULL; @@ -556,7 +570,8 @@ static int32_t mndCheckMnodeState(SRpcMsg *pMsg) { } if (mndAcquireRpcRef(pMsg->info.node) == 0) return 0; if (pMsg->msgType == TDMT_MND_MQ_TIMER || pMsg->msgType == TDMT_MND_TELEM_TIMER || - pMsg->msgType == TDMT_MND_TRANS_TIMER || pMsg->msgType == TDMT_MND_TTL_TIMER) { + pMsg->msgType == TDMT_MND_TRANS_TIMER || pMsg->msgType == TDMT_MND_TTL_TIMER || + pMsg->msgType == TDMT_MND_UPTIME_TIMER) { return -1; } @@ -705,7 +720,8 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr if (pObj->id == pMnode->selfDnodeId) { pClusterInfo->first_ep_dnode_id = pObj->id; tstrncpy(pClusterInfo->first_ep, pObj->pDnode->ep, sizeof(pClusterInfo->first_ep)); - pClusterInfo->master_uptime = (ms - pObj->stateStartTime) / (86400000.0f); + pClusterInfo->master_uptime = mndGetClusterUpTime(pMnode); + // pClusterInfo->master_uptime = (ms - pObj->stateStartTime) / (86400000.0f); tstrncpy(desc.role, syncStr(TAOS_SYNC_STATE_LEADER), sizeof(desc.role)); } else { tstrncpy(desc.role, syncStr(pObj->state), sizeof(desc.role)); diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c index 4f07d9e0143f52da057c40d2e655044da01a6b72..71bda4d4f34213a7b3240f6634b26579fb66c1ee 100644 --- a/source/dnode/mnode/impl/src/mndMnode.c +++ b/source/dnode/mnode/impl/src/mndMnode.c @@ -89,14 +89,14 @@ static int32_t mndCreateDefaultMnode(SMnode *pMnode) { if (pRaw == NULL) return -1; sdbSetRawStatus(pRaw, SDB_STATUS_READY); - mDebug("mnode:%d, will be created when deploying, raw:%p", mnodeObj.id, pRaw); + mInfo("mnode:%d, will be created when deploying, raw:%p", mnodeObj.id, pRaw); STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, NULL); if (pTrans == NULL) { mError("mnode:%d, failed to create since %s", mnodeObj.id, terrstr()); return -1; } - mDebug("trans:%d, used to create mnode:%d", pTrans->id, mnodeObj.id); + mInfo("trans:%d, used to create mnode:%d", pTrans->id, mnodeObj.id); if (mndTransAppendCommitlog(pTrans, pRaw) != 0) { mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); @@ -365,7 +365,7 @@ static int32_t mndCreateMnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq); if (pTrans == NULL) goto _OVER; mndTransSetSerial(pTrans); - mDebug("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId); + mInfo("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId); if (mndSetCreateMnodeRedoLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER; if (mndSetCreateMnodeCommitLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER; @@ -392,7 +392,7 @@ static int32_t mndProcessCreateMnodeReq(SRpcMsg *pReq) { goto _OVER; } - mDebug("mnode:%d, start to create", createReq.dnodeId); + mInfo("mnode:%d, start to create", createReq.dnodeId); if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_MNODE) != 0) { goto _OVER; } @@ -574,7 +574,7 @@ static int32_t mndDropMnode(SMnode *pMnode, SRpcMsg *pReq, SMnodeObj *pObj) { pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq); if (pTrans == NULL) goto _OVER; mndTransSetSerial(pTrans); - mDebug("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id); + mInfo("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id); if (mndSetDropMnodeInfoToTrans(pMnode, pTrans, pObj) != 0) goto _OVER; if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; @@ -597,7 +597,7 @@ static int32_t mndProcessDropMnodeReq(SRpcMsg *pReq) { goto _OVER; } - mDebug("mnode:%d, start to drop", dropReq.dnodeId); + mInfo("mnode:%d, start to drop", dropReq.dnodeId); if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_MNODE) != 0) { goto _OVER; } @@ -732,7 +732,7 @@ static int32_t mndProcessAlterMnodeReq(SRpcMsg *pReq) { } } - mTrace("trans:-1, sync reconfig will be proposed"); + mInfo("trans:-1, sync reconfig will be proposed"); SSyncMgmt *pMgmt = &pMnode->syncMgmt; pMgmt->standby = 0; diff --git a/source/dnode/mnode/impl/src/mndOffset.c b/source/dnode/mnode/impl/src/mndOffset.c index 9f6108004db0809b70907e13ca32988e6d278a48..037a46345ffed6b1205292e513df1c2db9528b3b 100644 --- a/source/dnode/mnode/impl/src/mndOffset.c +++ b/source/dnode/mnode/impl/src/mndOffset.c @@ -15,10 +15,10 @@ #define _DEFAULT_SOURCE #include "mndOffset.h" -#include "mndPrivilege.h" #include "mndDb.h" #include "mndDnode.h" #include "mndMnode.h" +#include "mndPrivilege.h" #include "mndShow.h" #include "mndStb.h" #include "mndTopic.h" @@ -305,7 +305,7 @@ int32_t mndDropOffsetByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) { sdbRelease(pSdb, pOffset); } - return code; + return code; } int32_t mndDropOffsetByTopic(SMnode *pMnode, STrans *pTrans, const char *topic) { diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index e55c562e38c207600956cd1eafbb88d744750f7d..e8737e30c9817bd71d1b3a47f245ef0004603dc3 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -270,6 +270,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { SConnectRsp connectRsp = {0}; connectRsp.acctId = pUser->acctId; connectRsp.superUser = pUser->superUser; + connectRsp.sysInfo = pUser->sysInfo; connectRsp.clusterId = pMnode->clusterId; connectRsp.connId = pConn->id; connectRsp.connType = connReq.connType; diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c index a24b7ef4597ceb1d5aba35efe907e9a7e12cb0a8..3bfd7eb5964a446698556551bbe572f2dc568110 100644 --- a/source/dnode/mnode/impl/src/mndScheduler.c +++ b/source/dnode/mnode/impl/src/mndScheduler.c @@ -424,6 +424,8 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) { } mndAddTaskToTaskSet(taskSourceLevel, pTask); + pTask->triggerParam = 0; + // source pTask->taskLevel = TASK_LEVEL__SOURCE; diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c index 9499c90c57c59e3600c701668dd17671f641d919..5a998dfe986d9f012e066f45810604b7ca9d728f 100644 --- a/source/dnode/mnode/impl/src/mndShow.c +++ b/source/dnode/mnode/impl/src/mndShow.c @@ -88,7 +88,7 @@ static int32_t convertToRetrieveType(char *name, int32_t len) { type = TSDB_MGMT_TABLE_VGROUP; } else if (strncasecmp(name, TSDB_PERFS_TABLE_CONSUMERS, len) == 0) { type = TSDB_MGMT_TABLE_CONSUMERS; - } else if (strncasecmp(name, TSDB_PERFS_TABLE_SUBSCRIPTIONS, len) == 0) { + } else if (strncasecmp(name, TSDB_INS_TABLE_SUBSCRIPTIONS, len) == 0) { type = TSDB_MGMT_TABLE_SUBSCRIPTIONS; } else if (strncasecmp(name, TSDB_PERFS_TABLE_TRANS, len) == 0) { type = TSDB_MGMT_TABLE_TRANS; @@ -102,9 +102,9 @@ static int32_t convertToRetrieveType(char *name, int32_t len) { type = TSDB_MGMT_TABLE_QUERIES; } else if (strncasecmp(name, TSDB_INS_TABLE_VNODES, len) == 0) { type = TSDB_MGMT_TABLE_VNODES; - } else if (strncasecmp(name, TSDB_PERFS_TABLE_TOPICS, len) == 0) { + } else if (strncasecmp(name, TSDB_INS_TABLE_TOPICS, len) == 0) { type = TSDB_MGMT_TABLE_TOPICS; - } else if (strncasecmp(name, TSDB_PERFS_TABLE_STREAMS, len) == 0) { + } else if (strncasecmp(name, TSDB_INS_TABLE_STREAMS, len) == 0) { type = TSDB_MGMT_TABLE_STREAMS; } else if (strncasecmp(name, TSDB_PERFS_TABLE_APPS, len) == 0) { type = TSDB_MGMT_TABLE_APPS; diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index 006d9e749cfd273f4f112e84e435d495f29125b1..8638cc511890066f45367253313aec8f626ceb8e 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -38,7 +38,6 @@ static SSdbRow *mndSmaActionDecode(SSdbRaw *pRaw); static int32_t mndSmaActionInsert(SSdb *pSdb, SSmaObj *pSma); static int32_t mndSmaActionDelete(SSdb *pSdb, SSmaObj *pSpSmatb); static int32_t mndSmaActionUpdate(SSdb *pSdb, SSmaObj *pOld, SSmaObj *pNew); -static int32_t mndSmaGetVgEpSet(SMnode *pMnode, SDbObj *pDb, SVgEpSet **ppVgEpSet, int32_t *numOfVgroups); static int32_t mndProcessCreateSmaReq(SRpcMsg *pReq); static int32_t mndProcessDropSmaReq(SRpcMsg *pReq); static int32_t mndProcessGetSmaReq(SRpcMsg *pReq); @@ -489,7 +488,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea smaObj.uid = mndGenerateUid(pCreate->name, TSDB_TABLE_FNAME_LEN); ASSERT(smaObj.uid != 0); char resultTbName[TSDB_TABLE_FNAME_LEN + 16] = {0}; - snprintf(resultTbName, TSDB_TABLE_FNAME_LEN + 16, "%s_td_tsma_rst_tb",pCreate->name); + snprintf(resultTbName, TSDB_TABLE_FNAME_LEN + 16, "%s_td_tsma_rst_tb", pCreate->name); memcpy(smaObj.dstTbName, resultTbName, TSDB_TABLE_FNAME_LEN); smaObj.dstTbUid = mndGenerateUid(smaObj.dstTbName, TSDB_TABLE_FNAME_LEN); smaObj.stbUid = pStb->uid; @@ -530,7 +529,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea streamObj.sourceDbUid = pDb->uid; streamObj.targetDbUid = pDb->uid; streamObj.version = 1; - streamObj.sql = pCreate->sql; + streamObj.sql = strdup(pCreate->sql); streamObj.smaId = smaObj.uid; streamObj.watermark = pCreate->watermark; streamObj.trigger = STREAM_TRIGGER_WINDOW_CLOSE; @@ -585,6 +584,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea return -1; } if (pAst != NULL) nodesDestroyNode(pAst); + nodesDestroyNode((SNode *)pPlan); int32_t code = -1; STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq); @@ -609,6 +609,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea code = 0; _OVER: + tFreeStreamObj(&streamObj); mndDestroySmaObj(&smaObj); mndTransDrop(pTrans); return code; @@ -839,6 +840,7 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p _OVER: mndTransDrop(pTrans); + mndReleaseStream(pMnode, pStream); mndReleaseVgroup(pMnode, pVgroup); mndReleaseStb(pMnode, pStb); return code; @@ -959,6 +961,7 @@ _OVER: mError("sma:%s, failed to drop since %s", dropReq.name, terrstr()); } + mndReleaseSma(pMnode, pSma); mndReleaseDb(pMnode, pDb); return code; } diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index 3e747b66c820828fd2f086318794ef0d3e2c88b4..dc8285740a4bdf9e0bfb04c36e780aca0f32f758 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -266,6 +266,15 @@ _OVER: return pRow; } +void mndFreeStb(SStbObj *pStb) { + taosArrayDestroy(pStb->pFuncs); + taosMemoryFreeClear(pStb->pColumns); + taosMemoryFreeClear(pStb->pTags); + taosMemoryFreeClear(pStb->comment); + taosMemoryFreeClear(pStb->pAst1); + taosMemoryFreeClear(pStb->pAst2); +} + static int32_t mndStbActionInsert(SSdb *pSdb, SStbObj *pStb) { mTrace("stb:%s, perform insert action, row:%p", pStb->name, pStb); return 0; @@ -273,12 +282,7 @@ static int32_t mndStbActionInsert(SSdb *pSdb, SStbObj *pStb) { static int32_t mndStbActionDelete(SSdb *pSdb, SStbObj *pStb) { mTrace("stb:%s, perform delete action, row:%p", pStb->name, pStb); - taosArrayDestroy(pStb->pFuncs); - taosMemoryFreeClear(pStb->pColumns); - taosMemoryFreeClear(pStb->pTags); - taosMemoryFreeClear(pStb->comment); - taosMemoryFreeClear(pStb->pAst1); - taosMemoryFreeClear(pStb->pAst2); + mndFreeStb(pStb); return 0; } @@ -438,6 +442,8 @@ static void *mndBuildVCreateStbReq(SMnode *pMnode, SVgObj *pVgroup, SStbObj *pSt if (req.rollup) { req.rsmaParam.maxdelay[0] = pStb->maxdelay[0]; req.rsmaParam.maxdelay[1] = pStb->maxdelay[1]; + req.rsmaParam.watermark[0] = pStb->watermark[0]; + req.rsmaParam.watermark[1] = pStb->watermark[1]; if (pStb->ast1Len > 0) { if (mndConvertRsmaTask(&req.rsmaParam.qmsg[0], &req.rsmaParam.qmsgLen[0], pStb->pAst1, pStb->uid, STREAM_TRIGGER_WINDOW_CLOSE, req.rsmaParam.watermark[0]) < 0) { @@ -530,7 +536,7 @@ int32_t mndCheckCreateStbReq(SMCreateStbReq *pCreate) { return -1; } - if (pCreate->numOfColumns < TSDB_MIN_COLUMNS || pCreate->numOfColumns > TSDB_MAX_COLUMNS) { + if (pCreate->numOfColumns < TSDB_MIN_COLUMNS || pCreate->numOfTags + pCreate->numOfColumns > TSDB_MAX_COLUMNS) { terrno = TSDB_CODE_PAR_INVALID_COLUMNS_NUM; return -1; } @@ -1145,7 +1151,7 @@ static int32_t mndAddSuperTableTag(const SStbObj *pOld, SStbObj *pNew, SArray *p return 0; } -int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t suid, col_id_t colId) { +static int32_t mndCheckAlterColForTopic(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId) { SSdb *pSdb = pMnode->pSdb; void *pIter = NULL; while (1) { @@ -1154,7 +1160,7 @@ int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t if (pIter == NULL) break; mDebug("topic:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, subType:%d sql:%s", - pTopic->name, stbname, suid, colId, pTopic->subType, pTopic->sql); + pTopic->name, stbFullName, suid, colId, pTopic->subType, pTopic->sql); if (pTopic->subType != TOPIC_SUB_TYPE__COLUMN) { sdbRelease(pSdb, pTopic); continue; @@ -1192,20 +1198,66 @@ int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t sdbRelease(pSdb, pTopic); nodesDestroyNode(pAst); } + return 0; +} +static int32_t mndCheckAlterColForStream(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId) { + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; + while (1) { + SStreamObj *pStream = NULL; + pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream); + if (pIter == NULL) break; + + SNode *pAst = NULL; + if (nodesStringToNode(pStream->ast, &pAst) != 0) { + ASSERT(0); + return -1; + } + + SNodeList *pNodeList = NULL; + nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList); + SNode *pNode = NULL; + FOREACH(pNode, pNodeList) { + SColumnNode *pCol = (SColumnNode *)pNode; + + if (pCol->tableId != suid) { + mDebug("stream:%s, check colId:%d passed", pStream->name, pCol->colId); + goto NEXT; + } + if (pCol->colId > 0 && pCol->colId == colId) { + sdbRelease(pSdb, pStream); + nodesDestroyNode(pAst); + terrno = TSDB_CODE_MND_STREAM_MUST_BE_DELETED; + mError("stream:%s, check colId:%d conflicted", pStream->name, pCol->colId); + return -1; + } + mDebug("stream:%s, check colId:%d passed", pStream->name, pCol->colId); + } + + NEXT: + sdbRelease(pSdb, pStream); + nodesDestroyNode(pAst); + } + return 0; +} + +static int32_t mndCheckAlterColForTSma(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId) { + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; while (1) { SSmaObj *pSma = NULL; pIter = sdbFetch(pSdb, SDB_SMA, pIter, (void **)&pSma); if (pIter == NULL) break; - mDebug("tsma:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, sql:%s", pSma->name, stbname, - suid, colId, pSma->sql); + mDebug("tsma:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, sql:%s", pSma->name, + stbFullName, suid, colId, pSma->sql); SNode *pAst = NULL; if (nodesStringToNode(pSma->ast, &pAst) != 0) { terrno = TSDB_CODE_SDB_INVALID_DATA_CONTENT; mError("tsma:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d failed since parse AST err", - pSma->name, stbname, suid, colId); + pSma->name, stbFullName, suid, colId); return -1; } @@ -1218,7 +1270,7 @@ int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t if ((pCol->tableId != suid) && (pSma->stbUid != suid)) { mDebug("tsma:%s, check colId:%d passed", pSma->name, pCol->colId); - goto NEXT2; + goto NEXT; } if ((pCol->colId) > 0 && (pCol->colId == colId)) { sdbRelease(pSdb, pSma); @@ -1230,11 +1282,24 @@ int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t mDebug("tsma:%s, check colId:%d passed", pSma->name, pCol->colId); } - NEXT2: + NEXT: sdbRelease(pSdb, pSma); nodesDestroyNode(pAst); } + return 0; +} + +int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId) { + if (mndCheckAlterColForTopic(pMnode, stbFullName, suid, colId) < 0) { + return -1; + } + if (mndCheckAlterColForStream(pMnode, stbFullName, suid, colId) < 0) { + return -1; + } + if (mndCheckAlterColForTSma(pMnode, stbFullName, suid, colId) < 0) { + return -1; + } return 0; } @@ -1709,6 +1774,67 @@ static int32_t mndBuildSMAlterStbRsp(SDbObj *pDb, SStbObj *pObj, void **pCont, i return 0; } +int32_t mndBuildSMCreateStbRsp(SMnode *pMnode, char* dbFName, char* stbFName, void **pCont, int32_t *pLen) { + int32_t ret = -1; + SDbObj *pDb = mndAcquireDb(pMnode, dbFName); + if (NULL == pDb) { + return -1; + } + + SStbObj *pObj = mndAcquireStb(pMnode, stbFName); + if (NULL == pObj) { + goto _OVER; + } + + SEncoder ec = {0}; + uint32_t contLen = 0; + SMCreateStbRsp stbRsp = {0}; + SName name = {0}; + tNameFromString(&name, pObj->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); + + stbRsp.pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp)); + if (NULL == stbRsp.pMeta) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto _OVER; + } + + ret = mndBuildStbSchemaImp(pDb, pObj, name.tname, stbRsp.pMeta); + if (ret) { + tFreeSMCreateStbRsp(&stbRsp); + goto _OVER; + } + + tEncodeSize(tEncodeSMCreateStbRsp, &stbRsp, contLen, ret); + if (ret) { + tFreeSMCreateStbRsp(&stbRsp); + goto _OVER; + } + + void *cont = taosMemoryMalloc(contLen); + tEncoderInit(&ec, cont, contLen); + tEncodeSMCreateStbRsp(&ec, &stbRsp); + tEncoderClear(&ec); + + tFreeSMCreateStbRsp(&stbRsp); + + *pCont = cont; + *pLen = contLen; + + ret = 0; + +_OVER: + if (pObj) { + mndReleaseStb(pMnode, pObj); + } + + if (pDb) { + mndReleaseDb(pMnode, pDb); + } + + return ret; +} + + static int32_t mndAlterStbImp(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pStb, bool needRsp, void *alterOriData, int32_t alterOriDataLen) { int32_t code = -1; @@ -1930,6 +2056,98 @@ _OVER: return code; } +static int32_t mndCheckDropStbForTopic(SMnode *pMnode, const char *stbFullName, int64_t suid) { + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; + while (1) { + SMqTopicObj *pTopic = NULL; + pIter = sdbFetch(pSdb, SDB_TOPIC, pIter, (void **)&pTopic); + if (pIter == NULL) break; + + if (pTopic->subType == TOPIC_SUB_TYPE__TABLE) { + if (pTopic->stbUid == suid) { + sdbRelease(pSdb, pTopic); + return -1; + } + } + + if (pTopic->subType != TOPIC_SUB_TYPE__COLUMN) { + sdbRelease(pSdb, pTopic); + continue; + } + + SNode *pAst = NULL; + if (nodesStringToNode(pTopic->ast, &pAst) != 0) { + ASSERT(0); + return -1; + } + + SNodeList *pNodeList = NULL; + nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList); + SNode *pNode = NULL; + FOREACH(pNode, pNodeList) { + SColumnNode *pCol = (SColumnNode *)pNode; + + if (pCol->tableId == suid) { + sdbRelease(pSdb, pTopic); + nodesDestroyNode(pAst); + return -1; + } else { + goto NEXT; + } + } + NEXT: + sdbRelease(pSdb, pTopic); + nodesDestroyNode(pAst); + } + return 0; +} + +static int32_t mndCheckDropStbForStream(SMnode *pMnode, const char *stbFullName, int64_t suid) { + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; + while (1) { + SStreamObj *pStream = NULL; + pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream); + if (pIter == NULL) break; + + if (pStream->smaId != 0) { + sdbRelease(pSdb, pStream); + continue; + } + + if (pStream->targetStbUid == suid) { + sdbRelease(pSdb, pStream); + return -1; + } + + SNode *pAst = NULL; + if (nodesStringToNode(pStream->ast, &pAst) != 0) { + ASSERT(0); + return -1; + } + + SNodeList *pNodeList = NULL; + nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList); + SNode *pNode = NULL; + FOREACH(pNode, pNodeList) { + SColumnNode *pCol = (SColumnNode *)pNode; + + if (pCol->tableId == suid) { + sdbRelease(pSdb, pStream); + nodesDestroyNode(pAst); + return -1; + } else { + goto NEXT; + } + } + NEXT: + sdbRelease(pSdb, pStream); + nodesDestroyNode(pAst); + } + return 0; +} + static int32_t mndProcessDropStbReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; int32_t code = -1; @@ -1971,6 +2189,16 @@ static int32_t mndProcessDropStbReq(SRpcMsg *pReq) { goto _OVER; } + if (mndCheckDropStbForTopic(pMnode, dropReq.name, pStb->uid) < 0) { + terrno = TSDB_CODE_MND_TOPIC_MUST_BE_DELETED; + goto _OVER; + } + + if (mndCheckDropStbForStream(pMnode, dropReq.name, pStb->uid) < 0) { + terrno = TSDB_CODE_MND_STREAM_MUST_BE_DELETED; + goto _OVER; + } + code = mndDropStb(pMnode, pReq, pDb, pStb); if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; @@ -1990,6 +2218,10 @@ static int32_t mndProcessTableMetaReq(SRpcMsg *pReq) { STableInfoReq infoReq = {0}; STableMetaRsp metaRsp = {0}; + SUserObj *pUser = mndAcquireUser(pMnode, pReq->info.conn.user); + if (pUser == NULL) return 0; + bool sysinfo = pUser->sysInfo; + if (tDeserializeSTableInfoReq(pReq->pCont, pReq->contLen, &infoReq) != 0) { terrno = TSDB_CODE_INVALID_MSG; goto _OVER; @@ -1997,7 +2229,7 @@ static int32_t mndProcessTableMetaReq(SRpcMsg *pReq) { if (0 == strcmp(infoReq.dbFName, TSDB_INFORMATION_SCHEMA_DB)) { mDebug("information_schema table:%s.%s, start to retrieve meta", infoReq.dbFName, infoReq.tbName); - if (mndBuildInsTableSchema(pMnode, infoReq.dbFName, infoReq.tbName, &metaRsp) != 0) { + if (mndBuildInsTableSchema(pMnode, infoReq.dbFName, infoReq.tbName, sysinfo, &metaRsp) != 0) { goto _OVER; } } else if (0 == strcmp(infoReq.dbFName, TSDB_PERFORMANCE_SCHEMA_DB)) { @@ -2036,6 +2268,7 @@ _OVER: mError("stb:%s.%s, failed to retrieve meta since %s", infoReq.dbFName, infoReq.tbName, terrstr()); } + mndReleaseUser(pMnode, pUser); tFreeSTableMetaRsp(&metaRsp); return code; } diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 8c453e0c88de32c9fca53a606a8590f0e5f9fb31..dd7a9e71eaa634a5bda506b318c6c4472a48726b 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -167,6 +167,9 @@ static int32_t mndStreamActionInsert(SSdb *pSdb, SStreamObj *pStream) { static int32_t mndStreamActionDelete(SSdb *pSdb, SStreamObj *pStream) { mTrace("stream:%s, perform delete action", pStream->name); + taosWLockLatch(&pStream->lock); + tFreeStreamObj(pStream); + taosWUnLockLatch(&pStream->lock); return 0; } @@ -493,10 +496,17 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre stbObj.uid = pStream->targetStbUid; - if (mndAddStbToTrans(pMnode, pTrans, pDb, &stbObj) < 0) goto _OVER; + if (mndAddStbToTrans(pMnode, pTrans, pDb, &stbObj) < 0) { + mndFreeStb(&stbObj); + goto _OVER; + } + + tFreeSMCreateStbReq(&createReq); + mndFreeStb(&stbObj); return 0; _OVER: + tFreeSMCreateStbReq(&createReq); mndReleaseStb(pMnode, pStb); mndReleaseDb(pMnode, pDb); return -1; @@ -621,6 +631,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { SStreamObj *pStream = NULL; SDbObj *pDb = NULL; SCMCreateStreamReq createStreamReq = {0}; + SStreamObj streamObj = {0}; if (tDeserializeSCMCreateStreamReq(pReq->pCont, pReq->contLen, &createStreamReq) != 0) { terrno = TSDB_CODE_INVALID_MSG; @@ -649,7 +660,6 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { } // build stream obj from request - SStreamObj streamObj = {0}; if (mndBuildStreamObjFromCreateReq(pMnode, &streamObj, &createStreamReq) < 0) { /*ASSERT(0);*/ mError("stream:%s, failed to create since %s", createStreamReq.name, terrstr()); @@ -715,6 +725,7 @@ _OVER: mndReleaseDb(pMnode, pDb); tFreeSCMCreateStreamReq(&createStreamReq); + tFreeStreamObj(&streamObj); return code; } diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 3f310ee9c09753d143e5c44e33506651c2765881..1452c5ae2fd3e9cde7cb9052d22e10bfd31afb0f 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -287,6 +287,7 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR if (consumerVgNum > minVgCnt) { if (imbCnt < imbConsumerNum) { if (consumerVgNum == minVgCnt + 1) { + imbCnt++; continue; } else { // pop until equal minVg + 1 @@ -356,31 +357,44 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR taosArrayPush(pConsumerEp->vgs, &pRebVg->pVgEp); pRebVg->newConsumerId = pConsumerEp->consumerId; taosArrayPush(pOutput->rebVgs, pRebVg); - mInfo("mq rebalance: add vgId:%d to consumer:%" PRId64 ",(second scan)", pRebVg->pVgEp->vgId, + mInfo("mq rebalance: add vgId:%d to consumer:%" PRId64 " (second scan) (not enough)", pRebVg->pVgEp->vgId, pConsumerEp->consumerId); } } + ASSERT(pIter == NULL); // 7. handle unassigned vg if (taosHashGetSize(pOutput->pSub->consumerHash) != 0) { // if has consumer, assign all left vg while (1) { + SMqConsumerEp *pConsumerEp = NULL; pRemovedIter = taosHashIterate(pHash, pRemovedIter); - if (pRemovedIter == NULL) break; - pIter = taosHashIterate(pOutput->pSub->consumerHash, pIter); - ASSERT(pIter); + if (pRemovedIter == NULL) { + if (pIter != NULL) { + taosHashCancelIterate(pOutput->pSub->consumerHash, pIter); + pIter = NULL; + } + break; + } + while (1) { + pIter = taosHashIterate(pOutput->pSub->consumerHash, pIter); + ASSERT(pIter); + pConsumerEp = (SMqConsumerEp *)pIter; + ASSERT(pConsumerEp->consumerId > 0); + if (taosArrayGetSize(pConsumerEp->vgs) == minVgCnt) { + break; + } + } pRebVg = (SMqRebOutputVg *)pRemovedIter; - SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)pIter; - ASSERT(pConsumerEp->consumerId > 0); taosArrayPush(pConsumerEp->vgs, &pRebVg->pVgEp); pRebVg->newConsumerId = pConsumerEp->consumerId; if (pRebVg->newConsumerId == pRebVg->oldConsumerId) { - mInfo("mq rebalance: skip vg %d for same consumer:%" PRId64 ",(second scan)", pRebVg->pVgEp->vgId, + mInfo("mq rebalance: skip vg %d for same consumer:%" PRId64 " (second scan)", pRebVg->pVgEp->vgId, pConsumerEp->consumerId); continue; } taosArrayPush(pOutput->rebVgs, pRebVg); - mInfo("mq rebalance: add vgId:%d to consumer:%" PRId64 ",(second scan)", pRebVg->pVgEp->vgId, + mInfo("mq rebalance: add vgId:%d to consumer:%" PRId64 " (second scan) (unassigned)", pRebVg->pVgEp->vgId, pConsumerEp->consumerId); } } else { @@ -571,7 +585,7 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { SMqTopicObj *pTopic = mndAcquireTopic(pMnode, topic); /*ASSERT(pTopic);*/ if (pTopic == NULL) { - mError("rebalance %s failed since topic %s was dropped, abort", pRebInfo->key, topic); + mError("mq rebalance %s failed since topic %s not exist, abort", pRebInfo->key, topic); continue; } taosRLockLatch(&pTopic->lock); @@ -601,7 +615,7 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { // TODO replace assert with error check if (mndPersistRebResult(pMnode, pMsg, &rebOutput) < 0) { - mError("persist rebalance output error, possibly vnode splitted or dropped"); + mError("mq rebalance persist rebalance output error, possibly vnode splitted or dropped"); } taosArrayDestroy(pRebInfo->lostConsumers); taosArrayDestroy(pRebInfo->newConsumers); diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index 8e8cff853c7504ffeaced773db99d22fa44a3568..e8b75e6a94e1089b037be9ec42a4fdc9deef3b3c 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -50,7 +50,7 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM int32_t transId = sdbGetIdFromRaw(pMnode->pSdb, pRaw); pMgmt->errCode = cbMeta.code; - mDebug("trans:%d, is proposed, saved:%d code:0x%x, apply index:%" PRId64 " term:%" PRIu64 " config:%" PRId64 + mInfo("trans:%d, is proposed, saved:%d code:0x%x, apply index:%" PRId64 " term:%" PRIu64 " config:%" PRId64 " role:%s raw:%p", transId, pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term, cbMeta.lastConfigIndex, syncStr(cbMeta.state), pRaw); @@ -88,7 +88,7 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM } int32_t mndSyncGetSnapshot(struct SSyncFSM *pFsm, SSnapshot *pSnapshot, void *pReaderParam, void **ppReader) { - mDebug("start to read snapshot from sdb in atomic way"); + mInfo("start to read snapshot from sdb in atomic way"); SMnode *pMnode = pFsm->data; return sdbStartRead(pMnode->pSdb, (SSdbIter **)ppReader, &pSnapshot->lastApplyIndex, &pSnapshot->lastApplyTerm, &pSnapshot->lastConfigIndex); @@ -136,13 +136,13 @@ void mndReConfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReConfigCbMeta cbM } int32_t mndSnapshotStartRead(struct SSyncFSM *pFsm, void *pParam, void **ppReader) { - mDebug("start to read snapshot from sdb"); + mInfo("start to read snapshot from sdb"); SMnode *pMnode = pFsm->data; return sdbStartRead(pMnode->pSdb, (SSdbIter **)ppReader, NULL, NULL, NULL); } int32_t mndSnapshotStopRead(struct SSyncFSM *pFsm, void *pReader) { - mDebug("stop to read snapshot from sdb"); + mInfo("stop to read snapshot from sdb"); SMnode *pMnode = pFsm->data; return sdbStopRead(pMnode->pSdb, pReader); } @@ -174,12 +174,12 @@ int32_t mndSnapshotDoWrite(struct SSyncFSM *pFsm, void *pWriter, void *pBuf, int void mndLeaderTransfer(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) { SMnode *pMnode = pFsm->data; atomic_store_8(&(pMnode->syncMgmt.leaderTransferFinish), 1); - mDebug("vgId:1, mnode leader transfer finish"); + mInfo("vgId:1, mnode leader transfer finish"); } static void mndBecomeFollower(struct SSyncFSM *pFsm) { SMnode *pMnode = pFsm->data; - mDebug("vgId:1, become follower and post sem"); + mInfo("vgId:1, become follower and post sem"); taosWLockLatch(&pMnode->syncMgmt.lock); if (pMnode->syncMgmt.transId != 0) { @@ -190,7 +190,7 @@ static void mndBecomeFollower(struct SSyncFSM *pFsm) { } static void mndBecomeLeader(struct SSyncFSM *pFsm) { - mDebug("vgId:1, become leader"); + mInfo("vgId:1, become leader"); SMnode *pMnode = pFsm->data; } @@ -284,7 +284,7 @@ int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId) { return -1; } else { pMgmt->transId = transId; - mDebug("trans:%d, will be proposed", pMgmt->transId); + mInfo("trans:%d, will be proposed", pMgmt->transId); taosWUnLockLatch(&pMgmt->lock); } @@ -314,7 +314,7 @@ void mndSyncStart(SMnode *pMnode) { SSyncMgmt *pMgmt = &pMnode->syncMgmt; syncSetMsgCb(pMgmt->sync, &pMnode->msgCb); syncStart(pMgmt->sync); - mDebug("mnode sync started, id:%" PRId64 " standby:%d", pMgmt->sync, pMgmt->standby); + mInfo("mnode sync started, id:%" PRId64 " standby:%d", pMgmt->sync, pMgmt->standby); } void mndSyncStop(SMnode *pMnode) { diff --git a/source/dnode/mnode/impl/src/mndTelem.c b/source/dnode/mnode/impl/src/mndTelem.c index 27814fe5bea155c54fa32789efbaf2ae30cdb29b..93f7531a272860d63351ff1a008fa11f48b5a17c 100644 --- a/source/dnode/mnode/impl/src/mndTelem.c +++ b/source/dnode/mnode/impl/src/mndTelem.c @@ -131,7 +131,9 @@ static int32_t mndProcessTelemTimer(SRpcMsg* pReq) { char* pCont = mndBuildTelemetryReport(pMnode); if (pCont != NULL) { if (taosSendHttpReport(tsTelemServer, tsTelemPort, pCont, strlen(pCont), HTTP_FLAT) != 0) { - mError("failed to send telemetry msg"); + mError("failed to send telemetry report"); + } else { + mTrace("succeed to send telemetry report"); } taosMemoryFree(pCont); } diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index 820bb4b636bf5ecdebdb55ead48eb9c7ee5aeca5..eb072d013d0024e5b05a172c3c3d5d55ce41cd40 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -57,7 +57,8 @@ int32_t mndInitTopic(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_MND_CREATE_TOPIC, mndProcessCreateTopicReq); mndSetMsgHandle(pMnode, TDMT_MND_DROP_TOPIC, mndProcessDropTopicReq); mndSetMsgHandle(pMnode, TDMT_VND_DROP_TOPIC_RSP, mndTransProcessRsp); - mndSetMsgHandle(pMnode, TDMT_VND_CHECK_ALTER_INFO_RSP, mndTransProcessRsp); + mndSetMsgHandle(pMnode, TDMT_VND_ADD_CHECK_INFO_RSP, mndTransProcessRsp); + mndSetMsgHandle(pMnode, TDMT_VND_DELETE_CHECK_INFO_RSP, mndTransProcessRsp); mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_TOPICS, mndRetrieveTopic); mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_TOPICS, mndCancelGetNextTopic); @@ -450,7 +451,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq * sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); if (topicObj.ntbUid != 0) { - SCheckAlterInfo info; + STqCheckInfo info; memcpy(info.topic, topicObj.name, TSDB_TOPIC_FNAME_LEN); info.ntbUid = topicObj.ntbUid; info.colIdList = topicObj.ntbColIds; @@ -470,7 +471,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq * // encoder check alter info int32_t len; int32_t code; - tEncodeSize(tEncodeSCheckAlterInfo, &info, len, code); + tEncodeSize(tEncodeSTqCheckInfo, &info, len, code); if (code < 0) { sdbRelease(pSdb, pVgroup); mndTransDrop(pTrans); @@ -481,7 +482,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq * void *abuf = POINTER_SHIFT(buf, sizeof(SMsgHead)); SEncoder encoder; tEncoderInit(&encoder, abuf, len); - if (tEncodeSCheckAlterInfo(&encoder, &info) < 0) { + if (tEncodeSTqCheckInfo(&encoder, &info) < 0) { sdbRelease(pSdb, pVgroup); mndTransDrop(pTrans); return -1; @@ -493,7 +494,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq * action.epSet = mndGetVgroupEpset(pMnode, pVgroup); action.pCont = buf; action.contLen = sizeof(SMsgHead) + len; - action.msgType = TDMT_VND_CHECK_ALTER_INFO; + action.msgType = TDMT_VND_ADD_CHECK_INFO; if (mndTransAppendRedoAction(pTrans, &action) != 0) { taosMemoryFree(buf); sdbRelease(pSdb, pVgroup); @@ -659,12 +660,14 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { mDebug("trans:%d, used to drop topic:%s", pTrans->id, pTopic->name); +#if 0 if (mndDropOffsetByTopic(pMnode, pTrans, dropReq.name) < 0) { ASSERT(0); mndTransDrop(pTrans); mndReleaseTopic(pMnode, pTopic); return -1; } +#endif // TODO check if rebalancing if (mndDropSubByTopic(pMnode, pTrans, dropReq.name) < 0) { @@ -675,6 +678,37 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { return -1; } + if (pTopic->ntbUid != 0) { + // broadcast to all vnode + void *pIter = NULL; + SVgObj *pVgroup = NULL; + while (1) { + pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup); + if (pIter == NULL) break; + if (!mndVgroupInDb(pVgroup, pTopic->dbUid)) { + sdbRelease(pSdb, pVgroup); + continue; + } + + void *buf = taosMemoryCalloc(1, sizeof(SMsgHead) + TSDB_TOPIC_FNAME_LEN); + void *abuf = POINTER_SHIFT(buf, sizeof(SMsgHead)); + ((SMsgHead *)buf)->vgId = htonl(pVgroup->vgId); + memcpy(abuf, pTopic->name, TSDB_TOPIC_FNAME_LEN); + + STransAction action = {0}; + action.epSet = mndGetVgroupEpset(pMnode, pVgroup); + action.pCont = buf; + action.contLen = sizeof(SMsgHead) + TSDB_TOPIC_FNAME_LEN; + action.msgType = TDMT_VND_DELETE_CHECK_INFO; + if (mndTransAppendRedoAction(pTrans, &action) != 0) { + taosMemoryFree(buf); + sdbRelease(pSdb, pVgroup); + mndTransDrop(pTrans); + return -1; + } + } + } + int32_t code = mndDropTopic(pMnode, pTrans, pReq, pTopic); mndReleaseTopic(pMnode, pTopic); @@ -729,8 +763,9 @@ static int32_t mndRetrieveTopic(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl int32_t cols = 0; char topicName[TSDB_TOPIC_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; - tNameFromString(&n, pTopic->name, T_NAME_ACCT | T_NAME_DB); - tNameGetDbName(&n, varDataVal(topicName)); + strcpy(varDataVal(topicName), mndGetDbStr(pTopic->name)); + /*tNameFromString(&n, pTopic->name, T_NAME_ACCT | T_NAME_DB);*/ + /*tNameGetDbName(&n, varDataVal(topicName));*/ varDataSetLen(topicName, strlen(varDataVal(topicName))); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)topicName, false); diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 17b4336465c23c49e71adde85a8b5291124c4f43..9c4a5afb032e6677997b7a84e919451b238b2068 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -17,6 +17,7 @@ #include "mndTrans.h" #include "mndConsumer.h" #include "mndDb.h" +#include "mndStb.h" #include "mndPrivilege.h" #include "mndShow.h" #include "mndSync.h" @@ -455,11 +456,11 @@ static const char *mndTransStr(ETrnStage stage) { } static void mndTransTestStartFunc(SMnode *pMnode, void *param, int32_t paramLen) { - mDebug("test trans start, param:%s, len:%d", (char *)param, paramLen); + mInfo("test trans start, param:%s, len:%d", (char *)param, paramLen); } static void mndTransTestStopFunc(SMnode *pMnode, void *param, int32_t paramLen) { - mDebug("test trans stop, param:%s, len:%d", (char *)param, paramLen); + mInfo("test trans stop, param:%s, len:%d", (char *)param, paramLen); } static TransCbFp mndTransGetCbFp(ETrnFunc ftype) { @@ -706,7 +707,7 @@ int32_t mndSetRpcInfoForDbTrans(SMnode *pMnode, SRpcMsg *pMsg, EOperType oper, c if (pTrans->oper == oper) { if (strcasecmp(dbname, pTrans->dbname1) == 0) { - mDebug("trans:%d, db:%s oper:%d matched with input", pTrans->id, dbname, oper); + mInfo("trans:%d, db:%s oper:%d matched with input", pTrans->id, dbname, oper); if (pTrans->pRpcArray == NULL) { pTrans->pRpcArray = taosArrayInit(1, sizeof(SRpcHandleInfo)); } @@ -745,7 +746,7 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) { } sdbSetRawStatus(pRaw, SDB_STATUS_READY); - mDebug("trans:%d, sync to other mnodes, stage:%s", pTrans->id, mndTransStr(pTrans->stage)); + mInfo("trans:%d, sync to other mnodes, stage:%s", pTrans->id, mndTransStr(pTrans->stage)); int32_t code = mndSyncPropose(pMnode, pRaw, pTrans->id); if (code != 0) { mError("trans:%d, failed to sync since %s", pTrans->id, terrstr()); @@ -754,7 +755,7 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) { } sdbFreeRaw(pRaw); - mDebug("trans:%d, sync finished", pTrans->id); + mInfo("trans:%d, sync finished", pTrans->id); return 0; } @@ -820,12 +821,12 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) { return -1; } - mDebug("trans:%d, prepare transaction", pTrans->id); + mInfo("trans:%d, prepare transaction", pTrans->id); if (mndTransSync(pMnode, pTrans) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); return -1; } - mDebug("trans:%d, prepare finished", pTrans->id); + mInfo("trans:%d, prepare finished", pTrans->id); STrans *pNew = mndAcquireTrans(pMnode, pTrans->id); if (pNew == NULL) { @@ -846,22 +847,22 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) { } static int32_t mndTransCommit(SMnode *pMnode, STrans *pTrans) { - mDebug("trans:%d, commit transaction", pTrans->id); + mInfo("trans:%d, commit transaction", pTrans->id); if (mndTransSync(pMnode, pTrans) != 0) { mError("trans:%d, failed to commit since %s", pTrans->id, terrstr()); return -1; } - mDebug("trans:%d, commit finished", pTrans->id); + mInfo("trans:%d, commit finished", pTrans->id); return 0; } static int32_t mndTransRollback(SMnode *pMnode, STrans *pTrans) { - mDebug("trans:%d, rollback transaction", pTrans->id); + mInfo("trans:%d, rollback transaction", pTrans->id); if (mndTransSync(pMnode, pTrans) != 0) { mError("trans:%d, failed to rollback since %s", pTrans->id, terrstr()); return -1; } - mDebug("trans:%d, rollback finished", pTrans->id); + mInfo("trans:%d, rollback finished", pTrans->id); return 0; } @@ -893,30 +894,21 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) { for (int32_t i = 0; i < size; ++i) { SRpcHandleInfo *pInfo = taosArrayGet(pTrans->pRpcArray, i); if (pInfo->handle != NULL) { - mDebug("trans:%d, send rsp, code:0x%x stage:%s app:%p", pTrans->id, code, mndTransStr(pTrans->stage), + mInfo("trans:%d, send rsp, code:0x%x stage:%s app:%p", pTrans->id, code, mndTransStr(pTrans->stage), pInfo->ahandle); if (code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { code = TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL; } SRpcMsg rspMsg = {.code = code, .info = *pInfo}; - if (pTrans->rpcRspLen != 0) { - void *rpcCont = rpcMallocCont(pTrans->rpcRspLen); - if (rpcCont != NULL) { - memcpy(rpcCont, pTrans->rpcRsp, pTrans->rpcRspLen); - rspMsg.pCont = rpcCont; - rspMsg.contLen = pTrans->rpcRspLen; - } - } - if (pTrans->originRpcType == TDMT_MND_CREATE_DB) { - mDebug("trans:%d, origin msgtype:%s", pTrans->id, TMSG_INFO(pTrans->originRpcType)); + mInfo("trans:%d, origin msgtype:%s", pTrans->id, TMSG_INFO(pTrans->originRpcType)); SDbObj *pDb = mndAcquireDb(pMnode, pTrans->dbname1); if (pDb != NULL) { for (int32_t j = 0; j < 12; j++) { bool ready = mndIsDbReady(pMnode, pDb); if (!ready) { - mDebug("trans:%d, db:%s not ready yet, wait %d times", pTrans->id, pTrans->dbname1, j); + mInfo("trans:%d, db:%s not ready yet, wait %d times", pTrans->id, pTrans->dbname1, j); taosMsleep(1000); } else { break; @@ -924,6 +916,21 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) { } } mndReleaseDb(pMnode, pDb); + } else if (pTrans->originRpcType == TDMT_MND_CREATE_STB) { + void *pCont = NULL; + int32_t contLen = 0; + if (0 == mndBuildSMCreateStbRsp(pMnode, pTrans->dbname1, pTrans->dbname2, &pCont, &contLen) != 0) { + mndTransSetRpcRsp(pTrans, pCont, contLen); + } + } + + if (pTrans->rpcRspLen != 0) { + void *rpcCont = rpcMallocCont(pTrans->rpcRspLen); + if (rpcCont != NULL) { + memcpy(rpcCont, pTrans->rpcRsp, pTrans->rpcRspLen); + rspMsg.pCont = rpcCont; + rspMsg.contLen = pTrans->rpcRspLen; + } } tmsgSendRsp(&rspMsg); @@ -971,7 +978,7 @@ int32_t mndTransProcessRsp(SRpcMsg *pRsp) { pAction->errCode = pRsp->code; } - mDebug("trans:%d, %s:%d response is received, code:0x%x, accept:0x%x retry:0x%x", transId, + mInfo("trans:%d, %s:%d response is received, code:0x%x, accept:0x%x retry:0x%x", transId, mndTransStr(pAction->stage), action, pRsp->code, pAction->acceptableCode, pAction->retryCode); mndTransExecute(pMnode, pTrans); @@ -987,10 +994,10 @@ static void mndTransResetAction(SMnode *pMnode, STrans *pTrans, STransAction *pA if (pAction->errCode == TSDB_CODE_RPC_REDIRECT || pAction->errCode == TSDB_CODE_SYN_NEW_CONFIG_ERROR || pAction->errCode == TSDB_CODE_SYN_INTERNAL_ERROR || pAction->errCode == TSDB_CODE_SYN_NOT_LEADER) { pAction->epSet.inUse = (pAction->epSet.inUse + 1) % pAction->epSet.numOfEps; - mDebug("trans:%d, %s:%d execute status is reset and set epset inuse:%d", pTrans->id, mndTransStr(pAction->stage), + mInfo("trans:%d, %s:%d execute status is reset and set epset inuse:%d", pTrans->id, mndTransStr(pAction->stage), pAction->id, pAction->epSet.inUse); } else { - mDebug("trans:%d, %s:%d execute status is reset", pTrans->id, mndTransStr(pAction->stage), pAction->id); + mInfo("trans:%d, %s:%d execute status is reset", pTrans->id, mndTransStr(pAction->stage), pAction->id); } pAction->errCode = 0; } @@ -1017,7 +1024,7 @@ static int32_t mndTransWriteSingleLog(SMnode *pMnode, STrans *pTrans, STransActi pAction->rawWritten = true; pAction->errCode = 0; code = 0; - mDebug("trans:%d, %s:%d write to sdb, type:%s status:%s", pTrans->id, mndTransStr(pAction->stage), pAction->id, + mInfo("trans:%d, %s:%d write to sdb, type:%s status:%s", pTrans->id, mndTransStr(pAction->stage), pAction->id, sdbTableName(pAction->pRaw->type), sdbStatusName(pAction->pRaw->status)); pTrans->lastAction = pAction->id; @@ -1066,7 +1073,7 @@ static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransActio pAction->msgSent = 1; pAction->msgReceived = 0; pAction->errCode = 0; - mDebug("trans:%d, %s:%d is sent, %s", pTrans->id, mndTransStr(pAction->stage), pAction->id, detail); + mInfo("trans:%d, %s:%d is sent, %s", pTrans->id, mndTransStr(pAction->stage), pAction->id, detail); pTrans->lastAction = pAction->id; pTrans->lastMsgType = pAction->msgType; @@ -1093,7 +1100,7 @@ static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransActio static int32_t mndTransExecNullMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction) { pAction->rawWritten = 0; pAction->errCode = 0; - mDebug("trans:%d, %s:%d confirm action executed", pTrans->id, mndTransStr(pAction->stage), pAction->id); + mInfo("trans:%d, %s:%d confirm action executed", pTrans->id, mndTransStr(pAction->stage), pAction->id); pTrans->lastAction = pAction->id; pTrans->lastMsgType = pAction->msgType; @@ -1153,7 +1160,7 @@ static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pA pTrans->lastMsgType = 0; memset(&pTrans->lastEpset, 0, sizeof(pTrans->lastEpset)); pTrans->lastErrorNo = 0; - mDebug("trans:%d, all %d actions execute successfully", pTrans->id, numOfActions); + mInfo("trans:%d, all %d actions execute successfully", pTrans->id, numOfActions); return 0; } else { mError("trans:%d, all %d actions executed, code:0x%x", pTrans->id, numOfActions, errCode & 0XFFFF); @@ -1168,7 +1175,7 @@ static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pA return errCode; } } else { - mDebug("trans:%d, %d of %d actions executed", pTrans->id, numOfExecuted, numOfActions); + mInfo("trans:%d, %d of %d actions executed", pTrans->id, numOfExecuted, numOfActions); return TSDB_CODE_ACTION_IN_PROGRESS; } } @@ -1214,7 +1221,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans) code = pAction->errCode; mndTransResetAction(pMnode, pTrans, pAction); } else { - mDebug("trans:%d, %s:%d execute successfully", pTrans->id, mndTransStr(pAction->stage), action); + mInfo("trans:%d, %s:%d execute successfully", pTrans->id, mndTransStr(pAction->stage), action); } } else { code = TSDB_CODE_ACTION_IN_PROGRESS; @@ -1223,7 +1230,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans) if (pAction->errCode != 0 && pAction->errCode != pAction->acceptableCode) { code = pAction->errCode; } else { - mDebug("trans:%d, %s:%d write successfully", pTrans->id, mndTransStr(pAction->stage), action); + mInfo("trans:%d, %s:%d write successfully", pTrans->id, mndTransStr(pAction->stage), action); } } else { } @@ -1247,7 +1254,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans) if (code == 0) { pTrans->code = 0; pTrans->redoActionPos++; - mDebug("trans:%d, %s:%d is executed and need sync to other mnodes", pTrans->id, mndTransStr(pAction->stage), + mInfo("trans:%d, %s:%d is executed and need sync to other mnodes", pTrans->id, mndTransStr(pAction->stage), pAction->id); code = mndTransSync(pMnode, pTrans); if (code != 0) { @@ -1256,17 +1263,17 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans) mndTransStr(pAction->stage), pAction->id, terrstr()); } } else if (code == TSDB_CODE_ACTION_IN_PROGRESS) { - mDebug("trans:%d, %s:%d is in progress and wait it finish", pTrans->id, mndTransStr(pAction->stage), pAction->id); + mInfo("trans:%d, %s:%d is in progress and wait it finish", pTrans->id, mndTransStr(pAction->stage), pAction->id); break; } else if (code == pAction->retryCode) { - mDebug("trans:%d, %s:%d receive code:0x%x and retry", pTrans->id, mndTransStr(pAction->stage), pAction->id, code); + mInfo("trans:%d, %s:%d receive code:0x%x and retry", pTrans->id, mndTransStr(pAction->stage), pAction->id, code); taosMsleep(300); action--; continue; } else { terrno = code; pTrans->code = code; - mDebug("trans:%d, %s:%d receive code:0x%x and wait another schedule, failedTimes:%d", pTrans->id, + mInfo("trans:%d, %s:%d receive code:0x%x and wait another schedule, failedTimes:%d", pTrans->id, mndTransStr(pAction->stage), pAction->id, code, pTrans->failedTimes); break; } @@ -1278,7 +1285,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans) static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans) { bool continueExec = true; pTrans->stage = TRN_STAGE_REDO_ACTION; - mDebug("trans:%d, stage from prepare to redoAction", pTrans->id); + mInfo("trans:%d, stage from prepare to redoAction", pTrans->id); return continueExec; } @@ -1297,10 +1304,10 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) { if (code == 0) { pTrans->code = 0; pTrans->stage = TRN_STAGE_COMMIT; - mDebug("trans:%d, stage from redoAction to commit", pTrans->id); + mInfo("trans:%d, stage from redoAction to commit", pTrans->id); continueExec = true; } else if (code == TSDB_CODE_ACTION_IN_PROGRESS) { - mDebug("trans:%d, stage keep on redoAction since %s", pTrans->id, tstrerror(code)); + mInfo("trans:%d, stage keep on redoAction since %s", pTrans->id, tstrerror(code)); continueExec = false; } else { pTrans->failedTimes++; @@ -1308,7 +1315,7 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) { if (pTrans->policy == TRN_POLICY_ROLLBACK) { if (pTrans->lastAction != 0) { STransAction *pAction = taosArrayGet(pTrans->redoActions, pTrans->lastAction); - if (pAction->retryCode != 0 && pAction->retryCode != pAction->errCode) { + if (pAction->retryCode != 0 && pAction->retryCode == pAction->errCode) { if (pTrans->failedTimes < 6) { mError("trans:%d, stage keep on redoAction since action:%d code:0x%x not 0x%x, failedTimes:%d", pTrans->id, pTrans->lastAction, pTrans->code, pAction->retryCode, pTrans->failedTimes); @@ -1340,7 +1347,7 @@ static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans) { if (code == 0) { pTrans->code = 0; pTrans->stage = TRN_STAGE_COMMIT_ACTION; - mDebug("trans:%d, stage from commit to commitAction", pTrans->id); + mInfo("trans:%d, stage from commit to commitAction", pTrans->id); continueExec = true; } else { pTrans->code = terrno; @@ -1359,7 +1366,7 @@ static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans) { if (code == 0) { pTrans->code = 0; pTrans->stage = TRN_STAGE_FINISHED; - mDebug("trans:%d, stage from commitAction to finished", pTrans->id); + mInfo("trans:%d, stage from commitAction to finished", pTrans->id); continueExec = true; } else { pTrans->code = terrno; @@ -1377,10 +1384,10 @@ static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans) { if (code == 0) { pTrans->stage = TRN_STAGE_FINISHED; - mDebug("trans:%d, stage from undoAction to finished", pTrans->id); + mInfo("trans:%d, stage from undoAction to finished", pTrans->id); continueExec = true; } else if (code == TSDB_CODE_ACTION_IN_PROGRESS) { - mDebug("trans:%d, stage keep on undoAction since %s", pTrans->id, tstrerror(code)); + mInfo("trans:%d, stage keep on undoAction since %s", pTrans->id, tstrerror(code)); continueExec = false; } else { pTrans->failedTimes++; @@ -1399,7 +1406,7 @@ static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans) { if (code == 0) { pTrans->stage = TRN_STAGE_UNDO_ACTION; - mDebug("trans:%d, stage from rollback to undoAction", pTrans->id); + mInfo("trans:%d, stage from rollback to undoAction", pTrans->id); continueExec = true; } else { pTrans->failedTimes++; @@ -1424,7 +1431,7 @@ static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans) { mError("trans:%d, failed to write sdb since %s", pTrans->id, terrstr()); } - mDebug("trans:%d, execute finished, code:0x%x, failedTimes:%d", pTrans->id, pTrans->code, pTrans->failedTimes); + mInfo("trans:%d, execute finished, code:0x%x, failedTimes:%d", pTrans->id, pTrans->code, pTrans->failedTimes); return continueExec; } @@ -1432,7 +1439,7 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans) { bool continueExec = true; while (continueExec) { - mDebug("trans:%d, continue to execute, stage:%s", pTrans->id, mndTransStr(pTrans->stage)); + mInfo("trans:%d, continue to execute, stage:%s", pTrans->id, mndTransStr(pTrans->stage)); pTrans->lastExecTime = taosGetTimestampMs(); switch (pTrans->stage) { case TRN_STAGE_PREPARE: diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index 0567ec4e1425a1815af45448505a47cfada71259..09eed7fb32e8831e6b6c863b44edd3e9e28110a3 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -509,6 +509,7 @@ int32_t mndAllocSmaVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup) { pVgroup->replica = 1; if (mndGetAvailableDnode(pMnode, pDb, pVgroup, pArray) != 0) return -1; + taosArrayDestroy(pArray); mInfo("db:%s, sma vgId:%d is alloced", pDb->name, pVgroup->vgId); return 0; @@ -1862,4 +1863,4 @@ _OVER: #endif } -bool mndVgroupInDb(SVgObj *pVgroup, int64_t dbUid) { return !pVgroup->isTsma && pVgroup->dbUid == dbUid; } \ No newline at end of file +bool mndVgroupInDb(SVgObj *pVgroup, int64_t dbUid) { return !pVgroup->isTsma && pVgroup->dbUid == dbUid; } diff --git a/source/dnode/mnode/impl/test/sma/CMakeLists.txt b/source/dnode/mnode/impl/test/sma/CMakeLists.txt index 3f9ec123a80e88371a98fa54c99342726831372d..a55b45ca11d32f4aa0baa2462007f06e970ae3d6 100644 --- a/source/dnode/mnode/impl/test/sma/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/sma/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME smaTest - COMMAND smaTest -) +if(NOT ${TD_WINDOWS}) + add_test( + NAME smaTest + COMMAND smaTest + ) +endif(NOT ${TD_WINDOWS}) diff --git a/source/dnode/mnode/impl/test/stb/CMakeLists.txt b/source/dnode/mnode/impl/test/stb/CMakeLists.txt index dcfbe658fcca82f928400b1e9eed2efcfb09a052..e3a3fc2e793fa84a5da05519ae727bb572edaa27 100644 --- a/source/dnode/mnode/impl/test/stb/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/stb/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME stbTest - COMMAND stbTest -) \ No newline at end of file +if(NOT ${TD_WINDOWS}) + add_test( + NAME stbTest + COMMAND stbTest + ) +endif(NOT ${TD_WINDOWS}) \ No newline at end of file diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index b218d982e9e37f0315bde21ff4c29fdee9154cc3..a3e17f53774c82ea9fca1ff0a88943c8e7971725 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -24,6 +24,7 @@ target_sources( "src/meta/metaCommit.c" "src/meta/metaEntry.c" "src/meta/metaSnapshot.c" + "src/meta/metaCache.c" # sma "src/sma/smaEnv.c" diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 18a7583f4c6eaa4d5c10b64a8cfeda272bc4ff24..5d4285b7c25e645dfccf18529cfd2173afa312cc 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -63,6 +63,7 @@ void vnodeGetInfo(SVnode *pVnode, const char **dbname, int32_t *vgId); int32_t vnodeProcessCreateTSma(SVnode *pVnode, void *pCont, uint32_t contLen); int32_t vnodeGetAllTableList(SVnode *pVnode, uint64_t uid, SArray *list); int32_t vnodeGetCtbIdList(SVnode *pVnode, int64_t suid, SArray *list); +int32_t vnodeGetStbIdList(SVnode *pVnode, int64_t suid, SArray *list); void *vnodeGetIdx(SVnode *pVnode); void *vnodeGetIvtIdx(SVnode *pVnode); @@ -91,9 +92,11 @@ typedef struct SMetaEntry SMetaEntry; void metaReaderInit(SMetaReader *pReader, SMeta *pMeta, int32_t flags); void metaReaderClear(SMetaReader *pReader); int32_t metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid); +int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj *tags); int32_t metaReadNext(SMetaReader *pReader); -const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t type, STagVal *tagVal); +const void *metaGetTableTagVal(void *tag, int16_t type, STagVal *tagVal); int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName); +bool metaIsTableExist(SMeta *pMeta, tb_uid_t uid); typedef struct SMetaFltParam { tb_uid_t suid; @@ -125,33 +128,60 @@ typedef struct STsdbReader STsdbReader; #define TIMEWINDOW_RANGE_CONTAINED 1 #define TIMEWINDOW_RANGE_EXTERNAL 2 -#define LASTROW_RETRIEVE_TYPE_ALL 0x1 -#define LASTROW_RETRIEVE_TYPE_SINGLE 0x2 - -int32_t tsdbSetTableId(STsdbReader *pReader, int64_t uid); -int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, SArray *pTableList, STsdbReader **ppReader, - const char *idstr); -void tsdbReaderClose(STsdbReader *pReader); -bool tsdbNextDataBlock(STsdbReader *pReader); -void tsdbRetrieveDataBlockInfo(STsdbReader *pReader, SDataBlockInfo *pDataBlockInfo); -int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SColumnDataAgg ***pBlockStatis, bool *allHave); -SArray *tsdbRetrieveDataBlock(STsdbReader *pTsdbReadHandle, SArray *pColumnIdList); -int32_t tsdbReaderReset(STsdbReader *pReader, SQueryTableDataCond *pCond); -int32_t tsdbGetFileBlocksDistInfo(STsdbReader *pReader, STableBlockDistInfo *pTableBlockInfo); -int64_t tsdbGetNumOfRowsInMemTable(STsdbReader *pHandle); -void *tsdbGetIdx(SMeta *pMeta); -void *tsdbGetIvtIdx(SMeta *pMeta); +#define CACHESCAN_RETRIEVE_TYPE_ALL 0x1 +#define CACHESCAN_RETRIEVE_TYPE_SINGLE 0x2 +#define CACHESCAN_RETRIEVE_LAST_ROW 0x4 +#define CACHESCAN_RETRIEVE_LAST 0x8 + +int32_t tsdbSetTableId(STsdbReader *pReader, int64_t uid); +int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, SArray *pTableList, STsdbReader **ppReader, + const char *idstr); +void tsdbReaderClose(STsdbReader *pReader); +bool tsdbNextDataBlock(STsdbReader *pReader); +void tsdbRetrieveDataBlockInfo(STsdbReader *pReader, SDataBlockInfo *pDataBlockInfo); +int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SColumnDataAgg ***pBlockStatis, bool *allHave); +SArray *tsdbRetrieveDataBlock(STsdbReader *pTsdbReadHandle, SArray *pColumnIdList); +int32_t tsdbReaderReset(STsdbReader *pReader, SQueryTableDataCond *pCond); +int32_t tsdbGetFileBlocksDistInfo(STsdbReader *pReader, STableBlockDistInfo *pTableBlockInfo); +int64_t tsdbGetNumOfRowsInMemTable(STsdbReader *pHandle); +void *tsdbGetIdx(SMeta *pMeta); +void *tsdbGetIvtIdx(SMeta *pMeta); uint64_t getReaderMaxVersion(STsdbReader *pReader); -int32_t tsdbLastRowReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader); -int32_t tsdbRetrieveLastRow(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray *pTableUids); -int32_t tsdbLastrowReaderClose(void *pReader); +int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader); +int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray *pTableUids); +int32_t tsdbCacherowsReaderClose(void *pReader); int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid); void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity); size_t tsdbCacheGetCapacity(SVnode *pVnode); // tq +typedef struct SMetaTableInfo { + int64_t suid; + int64_t uid; + SSchemaWrapper *schema; + char tbName[TSDB_TABLE_NAME_LEN]; +} SMetaTableInfo; + +typedef struct SIdInfo { + int64_t version; + int32_t index; +} SIdInfo; + +typedef struct SSnapContext { + SMeta *pMeta; + int64_t snapVersion; + TBC *pCur; + int64_t suid; + int8_t subType; + SHashObj *idVersion; + SHashObj *suidInfo; + SArray *idList; + int32_t index; + bool withMeta; + bool queryMetaOrData; // true-get meta, false-get data +} SSnapContext; typedef struct STqReader { int64_t ver; @@ -202,6 +232,13 @@ int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWr int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *pSnapshot); int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData); +int32_t buildSnapContext(SMeta *pMeta, int64_t snapVersion, int64_t suid, int8_t subType, bool withMeta, + SSnapContext **ctxRet); +int32_t getMetafromSnapShot(SSnapContext *ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t *uid); +SMetaTableInfo getUidfromSnapShot(SSnapContext *ctx); +int32_t setForSnapShot(SSnapContext *ctx, int64_t uid); +int32_t destroySnapContext(SSnapContext *ctx); + // structs struct STsdbCfg { int8_t precision; @@ -221,7 +258,9 @@ typedef struct { int64_t numOfSTables; int64_t numOfCTables; int64_t numOfNTables; + int64_t numOfNTimeSeries; int64_t numOfTimeSeries; + int64_t itvTimeSeries; int64_t pointsWritten; int64_t totalStorage; int64_t compStorage; diff --git a/source/dnode/vnode/src/inc/meta.h b/source/dnode/vnode/src/inc/meta.h index a72546fe86026288109f692315f850d7f5852997..adfbb919206a184664655cc11746c1d25c14147b 100644 --- a/source/dnode/vnode/src/inc/meta.h +++ b/source/dnode/vnode/src/inc/meta.h @@ -23,8 +23,9 @@ extern "C" { #endif -typedef struct SMetaIdx SMetaIdx; -typedef struct SMetaDB SMetaDB; +typedef struct SMetaIdx SMetaIdx; +typedef struct SMetaDB SMetaDB; +typedef struct SMetaCache SMetaCache; // metaDebug ================== // clang-format off @@ -60,6 +61,12 @@ static FORCE_INLINE tb_uid_t metaGenerateUid(SMeta* pMeta) { return tGenIdPI64() // metaTable ================== int metaHandleEntry(SMeta* pMeta, const SMetaEntry* pME); +// metaCache ================== +int32_t metaCacheOpen(SMeta* pMeta); +void metaCacheClose(SMeta* pMeta); +int32_t metaCacheUpsert(SMeta* pMeta, SMetaInfo* pInfo); +int32_t metaCacheDrop(SMeta* pMeta, int64_t uid); + struct SMeta { TdThreadRwlock lock; @@ -84,6 +91,8 @@ struct SMeta { TTB* pStreamDb; SMetaIdx* pIdx; + + SMetaCache* pCache; }; typedef struct { @@ -92,6 +101,12 @@ typedef struct { } STbDbKey; #pragma pack(push, 1) +typedef struct { + tb_uid_t suid; + int64_t version; + int32_t skmVer; +} SUidIdxVal; + typedef struct { tb_uid_t uid; int32_t sver; diff --git a/source/dnode/vnode/src/inc/sma.h b/source/dnode/vnode/src/inc/sma.h index 944d7759b28c41a901d7ed0f666eca2ffef30b6a..c29c4cb6c4e84db96ba5d419b17da97b86169ebe 100644 --- a/source/dnode/vnode/src/inc/sma.h +++ b/source/dnode/vnode/src/inc/sma.h @@ -32,7 +32,7 @@ extern "C" { #define smaTrace(...) do { if (smaDebugFlag & DEBUG_TRACE) { taosPrintLog("SMA ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0) // clang-format on -#define RSMA_TASK_INFO_HASH_SLOT 8 +#define RSMA_TASK_INFO_HASH_SLOT (8) typedef struct SSmaEnv SSmaEnv; typedef struct SSmaStat SSmaStat; @@ -48,18 +48,22 @@ typedef struct SQTaskFWriter SQTaskFWriter; struct SSmaEnv { SRWLatch lock; int8_t type; + int8_t flag; // 0x01 inClose SSmaStat *pStat; }; +#define SMA_ENV_FLG_CLOSE ((int8_t)0x1) + typedef struct { int8_t inited; int32_t rsetId; void *tmrHandle; // shared by all fetch tasks } SSmaMgmt; -#define SMA_ENV_LOCK(env) (&(env)->lock) -#define SMA_ENV_TYPE(env) ((env)->type) -#define SMA_ENV_STAT(env) ((env)->pStat) +#define SMA_ENV_LOCK(env) (&(env)->lock) +#define SMA_ENV_TYPE(env) ((env)->type) +#define SMA_ENV_STAT(env) ((env)->pStat) +#define SMA_RSMA_STAT(sma) ((SRSmaStat *)SMA_ENV_STAT((SSmaEnv *)(sma)->pRSmaEnv)) struct STSmaStat { int8_t state; // ETsdbSmaStat @@ -86,15 +90,17 @@ struct SQTaskFWriter { }; struct SRSmaStat { - SSma *pSma; - int64_t commitAppliedVer; // vnode applied version for async commit - int64_t refId; // shared by fetch tasks - SRWLatch lock; // r/w lock for rsma fs(e.g. qtaskinfo) - int8_t triggerStat; // shared by fetch tasks - int8_t commitStat; // 0 not in committing, 1 in committing - SArray *aTaskFile; // qTaskFiles committed recently(for recovery/snapshot r/w) - SHashObj *rsmaInfoHash; // key: stbUid, value: SRSmaInfo; - SHashObj *iRsmaInfoHash; // key: stbUid, value: SRSmaInfo; immutable rsmaInfoHash + SSma *pSma; + int64_t commitAppliedVer; // vnode applied version for async commit + int64_t refId; // shared by fetch tasks + volatile int64_t nBufItems; // number of items in queue buffer + SRWLatch lock; // r/w lock for rsma fs(e.g. qtaskinfo) + volatile int32_t nFetchAll; // active number of fetch all + int8_t triggerStat; // shared by fetch tasks + int8_t commitStat; // 0 not in committing, 1 in committing + SArray *aTaskFile; // qTaskFiles committed recently(for recovery/snapshot r/w) + SHashObj *infoHash; // key: suid, value: SRSmaInfo + tsem_t notEmpty; // has items in queue buffer }; struct SSmaStat { @@ -103,36 +109,45 @@ struct SSmaStat { SRSmaStat rsmaStat; // rollup sma }; T_REF_DECLARE() + char data[]; }; -#define SMA_TSMA_STAT(s) (&(s)->tsmaStat) -#define SMA_RSMA_STAT(s) (&(s)->rsmaStat) -#define RSMA_INFO_HASH(r) ((r)->rsmaInfoHash) -#define RSMA_IMU_INFO_HASH(r) ((r)->iRsmaInfoHash) -#define RSMA_TRIGGER_STAT(r) (&(r)->triggerStat) -#define RSMA_COMMIT_STAT(r) (&(r)->commitStat) -#define RSMA_REF_ID(r) ((r)->refId) -#define RSMA_FS_LOCK(r) (&(r)->lock) +#define SMA_STAT_TSMA(s) (&(s)->tsmaStat) +#define SMA_STAT_RSMA(s) (&(s)->rsmaStat) +#define RSMA_INFO_HASH(r) ((r)->infoHash) +#define RSMA_TRIGGER_STAT(r) (&(r)->triggerStat) +#define RSMA_COMMIT_STAT(r) (&(r)->commitStat) +#define RSMA_REF_ID(r) ((r)->refId) +#define RSMA_FS_LOCK(r) (&(r)->lock) struct SRSmaInfoItem { - int8_t level; - int8_t triggerStat; - int32_t maxDelay; - tmr_h tmrId; + int8_t level : 4; + int8_t fetchLevel : 4; + int8_t triggerStat; + uint16_t nSkipped; + int32_t maxDelay; // ms + tmr_h tmrId; }; struct SRSmaInfo { STSchema *pTSchema; int64_t suid; - int64_t refId; // refId of SRSmaStat + int64_t refId; // refId of SRSmaStat + int64_t lastRecv; // ms + int8_t assigned; // 0 idle, 1 assgined for exec int8_t delFlag; + int16_t padding; T_REF_DECLARE() SRSmaInfoItem items[TSDB_RETENTION_L2]; void *taskInfo[TSDB_RETENTION_L2]; // qTaskInfo_t - void *iTaskInfo[TSDB_RETENTION_L2]; // immutable + STaosQueue *queue; // buffer queue of SubmitReq + STaosQall *qall; // buffer qall of SubmitReq + void *iTaskInfo[TSDB_RETENTION_L2]; // immutable qTaskInfo_t + STaosQueue *iQueue; // immutable buffer queue of SubmitReq + STaosQall *iQall; // immutable buffer qall of SubmitReq }; -#define RSMA_INFO_HEAD_LEN 32 +#define RSMA_INFO_HEAD_LEN offsetof(SRSmaInfo, items) #define RSMA_INFO_IS_DEL(r) ((r)->delFlag == 1) #define RSMA_INFO_SET_DEL(r) ((r)->delFlag = 1) #define RSMA_INFO_QTASK(r, i) ((r)->taskInfo[i]) @@ -161,6 +176,12 @@ enum { RSMA_RESTORE_SYNC = 2, }; +typedef enum { + RSMA_EXEC_OVERFLOW = 1, // triggered by queue buf overflow + RSMA_EXEC_TIMEOUT = 2, // triggered by timer + RSMA_EXEC_COMMIT = 3, // triggered by commit +} ERsmaExecType; + void tdDestroySmaEnv(SSmaEnv *pSmaEnv); void *tdFreeSmaEnv(SSmaEnv *pSmaEnv); @@ -228,12 +249,13 @@ static FORCE_INLINE void tdSmaStatSetDropped(STSmaStat *pTStat) { void tdRSmaQTaskInfoGetFileName(int32_t vid, int64_t version, char *outputName); void tdRSmaQTaskInfoGetFullName(int32_t vid, int64_t version, const char *path, char *outputName); -int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo **pDest, SRSmaInfo *pSrc); +int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pInfo); void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level); static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType); void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType); void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree); int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash); +int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type); int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName); int32_t tdProcessRSmaRestoreImpl(SSma *pSma, int8_t type, int64_t qtaskFileVer); diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index a1dba41c941f9baf8a28304b80eb12530e929e9d..19dd321814eee08fff0ad8e296b6e72d78bd4c6a 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -67,8 +67,7 @@ typedef struct { // tqExec typedef struct { - char* qmsg; - qTaskInfo_t task; + char* qmsg; } STqExecCol; typedef struct { @@ -82,7 +81,8 @@ typedef struct { typedef struct { int8_t subType; - STqReader* pExecReader; + STqReader* pExecReader; + qTaskInfo_t task; union { STqExecCol execCol; STqExecTb execTb; @@ -101,7 +101,6 @@ typedef struct { int64_t snapshotVer; - // TODO remove SWalReader* pWalReader; SWalRef* pRef; @@ -117,16 +116,15 @@ typedef struct { struct STQ { SVnode* pVnode; char* path; - SHashObj* pushMgr; // consumerId -> STqHandle* - SHashObj* handles; // subKey -> STqHandle - SHashObj* pAlterInfo; // topic -> SAlterCheckInfo + SHashObj* pPushMgr; // consumerId -> STqHandle* + SHashObj* pHandle; // subKey -> STqHandle + SHashObj* pCheckInfo; // topic -> SAlterCheckInfo STqOffsetStore* pOffsetStore; - TDB* pMetaStore; + TDB* pMetaDB; TTB* pExecStore; - - TTB* pAlterInfoStore; + TTB* pCheckStore; SStreamMeta* pStreamMeta; }; @@ -142,11 +140,11 @@ int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle); int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle); // tqRead -int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* offset); +int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* offset); int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** pHeadWithCkSum); // tqExec -int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataRsp* pRsp); +int32_t tqLogScanExec(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, SMqDataRsp* pRsp); int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataRsp* pRsp); // tqMeta @@ -155,6 +153,9 @@ int32_t tqMetaClose(STQ* pTq); int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle); int32_t tqMetaDeleteHandle(STQ* pTq, const char* key); int32_t tqMetaRestoreHandle(STQ* pTq); +int32_t tqMetaSaveCheckInfo(STQ* pTq, const char* key, const void* value, int32_t vLen); +int32_t tqMetaDeleteCheckInfo(STQ* pTq, const char* key); +int32_t tqMetaRestoreCheckInfo(STQ* pTq); typedef struct { int32_t size; @@ -174,17 +175,6 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data); char* tqOffsetBuildFName(const char* path, int32_t ver); int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname); -static FORCE_INLINE void tqOffsetResetToData(STqOffsetVal* pOffsetVal, int64_t uid, int64_t ts) { - pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_DATA; - pOffsetVal->uid = uid; - pOffsetVal->ts = ts; -} - -static FORCE_INLINE void tqOffsetResetToLog(STqOffsetVal* pOffsetVal, int64_t ver) { - pOffsetVal->type = TMQ_OFFSET__LOG; - pOffsetVal->version = ver; -} - // tqStream int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask); diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index f1e980c026fc55249171ec08bec01227bb8b132b..d1f5cfb122d6fdfee2cb8f54911a07a25cbb078c 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -45,7 +45,7 @@ typedef struct SBlockIdx SBlockIdx; typedef struct SBlock SBlock; typedef struct SBlockL SBlockL; typedef struct SColData SColData; -typedef struct SBlockDataHdr SBlockDataHdr; +typedef struct SDiskDataHdr SDiskDataHdr; typedef struct SBlockData SBlockData; typedef struct SDelFile SDelFile; typedef struct SHeadFile SHeadFile; @@ -61,7 +61,11 @@ typedef struct SRowIter SRowIter; typedef struct STsdbFS STsdbFS; typedef struct SRowMerger SRowMerger; typedef struct STsdbReadSnap STsdbReadSnap; +typedef struct SBlockInfo SBlockInfo; +typedef struct SSmaInfo SSmaInfo; +typedef struct SBlockCol SBlockCol; +#define TSDB_FILE_DLMT ((uint32_t)0xF00AFA0F) #define TSDB_MAX_SUBBLOCKS 8 #define TSDB_FHDR_SIZE 512 @@ -113,10 +117,14 @@ int32_t tPutBlock(uint8_t *p, void *ph); int32_t tGetBlock(uint8_t *p, void *ph); int32_t tBlockCmprFn(const void *p1, const void *p2); bool tBlockHasSma(SBlock *pBlock); +// SBlockL +int32_t tPutBlockL(uint8_t *p, void *ph); +int32_t tGetBlockL(uint8_t *p, void *ph); // SBlockIdx int32_t tPutBlockIdx(uint8_t *p, void *ph); int32_t tGetBlockIdx(uint8_t *p, void *ph); int32_t tCmprBlockIdx(void const *lhs, void const *rhs); +int32_t tCmprBlockL(void const *lhs, void const *rhs); // SColdata void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t smaOn); void tColDataReset(SColData *pColData); @@ -131,20 +139,25 @@ int32_t tGetColData(uint8_t *p, SColData *pColData); #define tBlockDataLastRow(PBLOCKDATA) tsdbRowFromBlockData(PBLOCKDATA, (PBLOCKDATA)->nRow - 1) #define tBlockDataFirstKey(PBLOCKDATA) TSDBROW_KEY(&tBlockDataFirstRow(PBLOCKDATA)) #define tBlockDataLastKey(PBLOCKDATA) TSDBROW_KEY(&tBlockDataLastRow(PBLOCKDATA)) -int32_t tBlockDataInit(SBlockData *pBlockData); + +int32_t tBlockDataCreate(SBlockData *pBlockData); +void tBlockDataDestroy(SBlockData *pBlockData, int8_t deepClear); +int32_t tBlockDataInit(SBlockData *pBlockData, int64_t suid, int64_t uid, STSchema *pTSchema); +int32_t tBlockDataInitEx(SBlockData *pBlockData, SBlockData *pBlockDataFrom); void tBlockDataReset(SBlockData *pBlockData); -int32_t tBlockDataSetSchema(SBlockData *pBlockData, STSchema *pTSchema); -int32_t tBlockDataCorrectSchema(SBlockData *pBlockData, SBlockData *pBlockDataFrom); -void tBlockDataClearData(SBlockData *pBlockData); -void tBlockDataClear(SBlockData *pBlockData, int8_t deepClear); -int32_t tBlockDataAddColData(SBlockData *pBlockData, int32_t iColData, SColData **ppColData); -int32_t tBlockDataAppendRow(SBlockData *pBlockData, TSDBROW *pRow, STSchema *pTSchema); -int32_t tBlockDataMerge(SBlockData *pBlockData1, SBlockData *pBlockData2, SBlockData *pBlockData); -int32_t tBlockDataCopy(SBlockData *pBlockDataSrc, SBlockData *pBlockDataDest); +int32_t tBlockDataAppendRow(SBlockData *pBlockData, TSDBROW *pRow, STSchema *pTSchema, int64_t uid); +void tBlockDataClear(SBlockData *pBlockData); SColData *tBlockDataGetColDataByIdx(SBlockData *pBlockData, int32_t idx); void tBlockDataGetColData(SBlockData *pBlockData, int16_t cid, SColData **ppColData); -int32_t tPutBlockData(uint8_t *p, SBlockData *pBlockData); -int32_t tGetBlockData(uint8_t *p, SBlockData *pBlockData); +int32_t tBlockDataCopy(SBlockData *pBlockDataSrc, SBlockData *pBlockDataDest); +int32_t tBlockDataMerge(SBlockData *pBlockData1, SBlockData *pBlockData2, SBlockData *pBlockData); +int32_t tBlockDataAddColData(SBlockData *pBlockData, int32_t iColData, SColData **ppColData); +int32_t tCmprBlockData(SBlockData *pBlockData, int8_t cmprAlg, uint8_t **ppOut, int32_t *szOut, uint8_t *aBuf[], + int32_t aBufN[]); +int32_t tDecmprBlockData(uint8_t *pIn, int32_t szIn, SBlockData *pBlockData, uint8_t *aBuf[]); +// SDiskDataHdr +int32_t tPutDiskDataHdr(uint8_t *p, void *ph); +int32_t tGetDiskDataHdr(uint8_t *p, void *ph); // SDelIdx int32_t tPutDelIdx(uint8_t *p, void *ph); int32_t tGetDelIdx(uint8_t *p, void *ph); @@ -168,13 +181,25 @@ void tsdbFidKeyRange(int32_t fid, int32_t minutes, int8_t precision, TSKEY *m int32_t tsdbFidLevel(int32_t fid, STsdbKeepCfg *pKeepCfg, int64_t now); int32_t tsdbBuildDeleteSkyline(SArray *aDelData, int32_t sidx, int32_t eidx, SArray *aSkyline); void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg); +int32_t tPutColumnDataAgg(uint8_t *p, SColumnDataAgg *pColAgg); +int32_t tGetColumnDataAgg(uint8_t *p, SColumnDataAgg *pColAgg); +int32_t tsdbCmprData(uint8_t *pIn, int32_t szIn, int8_t type, int8_t cmprAlg, uint8_t **ppOut, int32_t nOut, + int32_t *szOut, uint8_t **ppBuf); +int32_t tsdbDecmprData(uint8_t *pIn, int32_t szIn, int8_t type, int8_t cmprAlg, uint8_t **ppOut, int32_t szOut, + uint8_t **ppBuf); +int32_t tsdbCmprColData(SColData *pColData, int8_t cmprAlg, SBlockCol *pBlockCol, uint8_t **ppOut, int32_t nOut, + uint8_t **ppBuf); +int32_t tsdbDecmprColData(uint8_t *pIn, SBlockCol *pBlockCol, int8_t cmprAlg, int32_t nVal, SColData *pColData, + uint8_t **ppBuf); +int32_t tsdbReadAndCheck(TdFilePtr pFD, int64_t offset, uint8_t **ppOut, int32_t size, int8_t toCheck); // tsdbMemTable ============================================================================================== // SMemTable -int32_t tsdbMemTableCreate(STsdb *pTsdb, SMemTable **ppMemTable); -void tsdbMemTableDestroy(SMemTable *pMemTable); -void tsdbGetTbDataFromMemTable(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid, STbData **ppTbData); -void tsdbRefMemTable(SMemTable *pMemTable); -void tsdbUnrefMemTable(SMemTable *pMemTable); +int32_t tsdbMemTableCreate(STsdb *pTsdb, SMemTable **ppMemTable); +void tsdbMemTableDestroy(SMemTable *pMemTable); +STbData *tsdbGetTbDataFromMemTable(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid); +void tsdbRefMemTable(SMemTable *pMemTable); +void tsdbUnrefMemTable(SMemTable *pMemTable); +SArray *tsdbMemTableGetTbDataArray(SMemTable *pMemTable); // STbDataIter int32_t tsdbTbDataIterCreate(STbData *pTbData, TSDBKEY *pFrom, int8_t backward, STbDataIter **ppIter); void *tsdbTbDataIterDestroy(STbDataIter *pIter); @@ -223,33 +248,33 @@ int32_t tsdbFSUpsertDelFile(STsdbFS *pFS, SDelFile *pDelFile); int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pSet); int32_t tsdbDataFWriterClose(SDataFWriter **ppWriter, int8_t sync); int32_t tsdbUpdateDFileSetHeader(SDataFWriter *pWriter); -int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx, uint8_t **ppBuf); -int32_t tsdbWriteBlock(SDataFWriter *pWriter, SMapData *pMapData, uint8_t **ppBuf, SBlockIdx *pBlockIdx); -int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, uint8_t **ppBuf1, uint8_t **ppBuf2, - SBlockIdx *pBlockIdx, SBlock *pBlock, int8_t cmprAlg); +int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx); +int32_t tsdbWriteBlock(SDataFWriter *pWriter, SMapData *pMapData, SBlockIdx *pBlockIdx); +int32_t tsdbWriteBlockL(SDataFWriter *pWriter, SArray *aBlockL); +int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo, + int8_t cmprAlg, int8_t toLast); int32_t tsdbDFileSetCopy(STsdb *pTsdb, SDFileSet *pSetFrom, SDFileSet *pSetTo); // SDataFReader int32_t tsdbDataFReaderOpen(SDataFReader **ppReader, STsdb *pTsdb, SDFileSet *pSet); int32_t tsdbDataFReaderClose(SDataFReader **ppReader); -int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx, uint8_t **ppBuf); -int32_t tsdbReadBlock(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *pMapData, uint8_t **ppBuf); -int32_t tsdbReadColData(SDataFReader *pReader, SBlockIdx *pBlockIdx, SBlock *pBlock, int16_t *aColId, int32_t nCol, - SBlockData *pBlockData, uint8_t **ppBuf1, uint8_t **ppBuf2); -int32_t tsdbReadBlockData(SDataFReader *pReader, SBlockIdx *pBlockIdx, SBlock *pBlock, SBlockData *pBlockData, - uint8_t **ppBuf1, uint8_t **ppBuf2); -int32_t tsdbReadBlockSma(SDataFReader *pReader, SBlock *pBlock, SArray *aColumnDataAgg, uint8_t **ppBuf); +int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx); +int32_t tsdbReadBlock(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *pMapData); +int32_t tsdbReadBlockL(SDataFReader *pReader, SArray *aBlockL); +int32_t tsdbReadBlockSma(SDataFReader *pReader, SBlock *pBlock, SArray *aColumnDataAgg); +int32_t tsdbReadDataBlock(SDataFReader *pReader, SBlock *pBlock, SBlockData *pBlockData); +int32_t tsdbReadLastBlock(SDataFReader *pReader, SBlockL *pBlockL, SBlockData *pBlockData); // SDelFWriter int32_t tsdbDelFWriterOpen(SDelFWriter **ppWriter, SDelFile *pFile, STsdb *pTsdb); int32_t tsdbDelFWriterClose(SDelFWriter **ppWriter, int8_t sync); -int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, uint8_t **ppBuf, SDelIdx *pDelIdx); -int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx, uint8_t **ppBuf); +int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, SDelIdx *pDelIdx); +int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx); int32_t tsdbUpdateDelFileHdr(SDelFWriter *pWriter); // SDelFReader -int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb, uint8_t **ppBuf); +int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb); int32_t tsdbDelFReaderClose(SDelFReader **ppReader); -int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData, uint8_t **ppBuf); -int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx, uint8_t **ppBuf); +int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData); +int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx); // tsdbRead.c ============================================================================================== int32_t tsdbTakeReadSnap(STsdb *pTsdb, STsdbReadSnap **ppSnap); void tsdbUntakeReadSnap(STsdb *pTsdb, STsdbReadSnap *pSnap); @@ -260,7 +285,7 @@ void tsdbUntakeReadSnap(STsdb *pTsdb, STsdbReadSnap *pSnap); // tsdbCache int32_t tsdbOpenCache(STsdb *pTsdb); -void tsdbCloseCache(SLRUCache *pCache); +void tsdbCloseCache(STsdb *pTsdb); int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb *pTsdb); int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, STSRow *row, bool dup); int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHandle **h); @@ -277,13 +302,6 @@ size_t tsdbCacheGetCapacity(SVnode *pVnode); int32_t tsdbCacheLastArray2Row(SArray *pLastArray, STSRow **ppRow, STSchema *pSchema); // structs ======================= -typedef struct { - int minFid; - int midFid; - int maxFid; - TSKEY minKey; -} SRtn; - struct STsdbFS { SDelFile *pDelFile; SArray *aDFileSet; // SArray @@ -298,6 +316,7 @@ struct STsdb { SMemTable *imem; STsdbFS fs; SLRUCache *lruCache; + TdThreadMutex lruMutex; }; struct TSDBKEY { @@ -311,30 +330,23 @@ struct SMemSkipListNode { SMemSkipListNode *forwards[0]; }; typedef struct SMemSkipList { - uint32_t seed; int64_t size; + uint32_t seed; int8_t maxLevel; int8_t level; SMemSkipListNode *pHead; SMemSkipListNode *pTail; } SMemSkipList; -struct SDelDataInfo { - tb_uid_t suid; - tb_uid_t uid; -}; - struct STbData { tb_uid_t suid; tb_uid_t uid; TSKEY minKey; TSKEY maxKey; - int64_t minVersion; - int64_t maxVersion; - int32_t maxSkmVer; SDelData *pHead; SDelData *pTail; SMemSkipList sl; + STbData *next; }; struct SMemTable { @@ -344,11 +356,13 @@ struct SMemTable { volatile int32_t nRef; TSKEY minKey; TSKEY maxKey; - int64_t minVersion; - int64_t maxVersion; int64_t nRow; int64_t nDel; - SArray *aTbData; // SArray + struct { + int32_t nTbData; + int32_t nBucket; + STbData **aBucket; + }; }; struct TSDBROW { @@ -379,63 +393,51 @@ struct SMapData { uint8_t *pData; }; -typedef struct { +struct SBlockCol { int16_t cid; int8_t type; int8_t smaOn; - int8_t flag; // HAS_NONE|HAS_NULL|HAS_VALUE - int32_t offset; - int32_t szBitmap; // bitmap size - int32_t szOffset; // size of offset, only for variant-length data type - int32_t szValue; // compressed column value size + int8_t flag; // HAS_NONE|HAS_NULL|HAS_VALUE int32_t szOrigin; // original column value size (only save for variant data type) -} SBlockCol; - -typedef struct { - int32_t nRow; - int8_t cmprAlg; - int64_t offset; // block data offset - int32_t szBlockCol; // SBlockCol size - int32_t szVersion; // VERSION size - int32_t szTSKEY; // TSKEY size - int32_t szBlock; // total block size - int64_t sOffset; // sma offset - int32_t nSma; // sma size -} SSubBlock; + int32_t szBitmap; // bitmap size, 0 only for flag == HAS_VAL + int32_t szOffset; // offset size, 0 only for non-variant-length type + int32_t szValue; // value size, 0 when flag == (HAS_NULL | HAS_NONE) + int32_t offset; +}; + +struct SBlockInfo { + int64_t offset; // block data offset + int32_t szBlock; + int32_t szKey; +}; + +struct SSmaInfo { + int64_t offset; + int32_t size; +}; struct SBlock { - TSDBKEY minKey; - TSDBKEY maxKey; - int64_t minVersion; - int64_t maxVersion; - int32_t nRow; - int8_t last; - int8_t hasDup; - int8_t nSubBlock; - SSubBlock aSubBlock[TSDB_MAX_SUBBLOCKS]; + TSDBKEY minKey; + TSDBKEY maxKey; + int64_t minVer; + int64_t maxVer; + int32_t nRow; + int8_t hasDup; + int8_t nSubBlock; + SBlockInfo aSubBlock[TSDB_MAX_SUBBLOCKS]; + SSmaInfo smaInfo; }; struct SBlockL { - struct { - int64_t uid; - int64_t version; - TSKEY ts; - } minKey; - struct { - int64_t uid; - int64_t version; - TSKEY ts; - } maxKey; - int64_t minVer; - int64_t maxVer; - int32_t nRow; - int8_t cmprAlg; - int64_t offset; - int32_t szBlock; - int32_t szBlockCol; - int32_t szUid; - int32_t szVer; - int32_t szTSKEY; + int64_t suid; + int64_t minUid; + int64_t maxUid; + TSKEY minKey; + TSKEY maxKey; + int64_t minVer; + int64_t maxVer; + int32_t nRow; + SBlockInfo bInfo; }; struct SColData { @@ -450,10 +452,17 @@ struct SColData { uint8_t *pData; }; +// (SBlockData){.suid = 0, .uid = 0}: block data not initialized +// (SBlockData){.suid = suid, .uid = uid}: block data for ONE child table int .data file +// (SBlockData){.suid = suid, .uid = 0}: block data for N child tables int .last file +// (SBlockData){.suid = 0, .uid = uid}: block data for 1 normal table int .last/.data file struct SBlockData { - int32_t nRow; - int64_t *aVersion; - TSKEY *aTSKEY; + int64_t suid; // 0 means normal table block data, otherwise child table block data + int64_t uid; // 0 means block data in .last file, otherwise in .data file + int32_t nRow; // number of rows + int64_t *aUid; // uids of each row, only exist in block data in .last file (uid == 0) + int64_t *aVersion; // versions of each row + TSKEY *aTSKEY; // timestamp of each row SArray *aIdx; // SArray SArray *aColData; // SArray }; @@ -491,13 +500,18 @@ struct SDelIdx { int64_t size; }; -#pragma pack(push, 1) -struct SBlockDataHdr { +struct SDiskDataHdr { uint32_t delimiter; + uint32_t fmtVer; int64_t suid; int64_t uid; + int32_t szUid; + int32_t szVer; + int32_t szKey; + int32_t szBlkCol; + int32_t nRow; + int8_t cmprAlg; }; -#pragma pack(pop) struct SDelFile { volatile int32_t nRef; @@ -527,6 +541,7 @@ struct SLastFile { int64_t commitID; int64_t size; + int64_t offset; }; struct SSmaFile { @@ -561,6 +576,8 @@ struct SDelFWriter { STsdb *pTsdb; SDelFile fDel; TdFilePtr pWriteH; + + uint8_t *aBuf[1]; }; struct SDataFWriter { @@ -576,6 +593,8 @@ struct SDataFWriter { SDataFile fData; SLastFile fLast; SSmaFile fSma; + + uint8_t *aBuf[4]; }; struct STsdbReadSnap { diff --git a/source/dnode/vnode/src/inc/vnd.h b/source/dnode/vnode/src/inc/vnd.h index dd1facb462ceb2b543f4a2ea443c9a886a6b13ca..898e79928b8fbb3ceac2272b36e31b47690eee23 100644 --- a/source/dnode/vnode/src/inc/vnd.h +++ b/source/dnode/vnode/src/inc/vnd.h @@ -65,6 +65,7 @@ struct SVBufPool { SVBufPool* next; SVnode* pVnode; volatile int32_t nRef; + TdThreadSpinlock lock; int64_t size; uint8_t* ptr; SVBufPoolNode* pTail; @@ -80,7 +81,7 @@ int32_t vnodeQueryOpen(SVnode* pVnode); void vnodeQueryClose(SVnode* pVnode); int32_t vnodeGetTableMeta(SVnode* pVnode, SRpcMsg* pMsg, bool direct); int vnodeGetTableCfg(SVnode* pVnode, SRpcMsg* pMsg, bool direct); -int32_t vnodeGetBatchMeta(SVnode *pVnode, SRpcMsg *pMsg); +int32_t vnodeGetBatchMeta(SVnode* pVnode, SRpcMsg* pMsg); // vnodeCommit.c int32_t vnodeBegin(SVnode* pVnode); @@ -98,6 +99,8 @@ void vnodeSyncStart(SVnode* pVnode); void vnodeSyncClose(SVnode* pVnode); void vnodeRedirectRpcMsg(SVnode* pVnode, SRpcMsg* pMsg); bool vnodeIsLeader(SVnode* pVnode); +bool vnodeIsReadyForRead(SVnode* pVnode); +bool vnodeIsRoleLeader(SVnode* pVnode); #ifdef __cplusplus } diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 02c4129d6f5b4e6112e3e636fe502988787663b8..9b252df58b2a87c4baf8453ad597d62e50b61a33 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -102,7 +102,7 @@ int metaCommit(SMeta* pMeta); int metaCreateSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq); int metaAlterSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq); int metaDropSTable(SMeta* pMeta, int64_t verison, SVDropStbReq* pReq, SArray* tbUidList); -int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq); +int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq, STableMetaRsp **pMetaRsp); int metaDropTable(SMeta* pMeta, int64_t version, SVDropTbReq* pReq, SArray* tbUids); int metaTtlDropTable(SMeta* pMeta, int64_t ttl, SArray* tbUids); int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq, STableMetaRsp* pMetaRsp); @@ -130,6 +130,14 @@ int metaTtlSmaller(SMeta* pMeta, uint64_t time, SArray* uidList); int32_t metaCreateTSma(SMeta* pMeta, int64_t version, SSmaCfg* pCfg); int32_t metaDropTSma(SMeta* pMeta, int64_t indexUid); +typedef struct SMetaInfo { + int64_t uid; + int64_t suid; + int64_t version; + int32_t skmVer; +} SMetaInfo; +int32_t metaGetInfo(SMeta* pMeta, int64_t uid, SMetaInfo* pInfo); + // tsdb int tsdbOpen(SVnode* pVnode, STsdb** ppTsdb, const char* dir, STsdbKeepCfg* pKeepCfg); int tsdbClose(STsdb** pTsdb); @@ -144,6 +152,7 @@ int32_t tsdbDeleteTableData(STsdb* pTsdb, int64_t version, tb_uid_t suid, tb STsdbReader tsdbQueryCacheLastT(STsdb* tsdb, SQueryTableDataCond* pCond, STableListInfo* tableList, uint64_t qId, void* pMemRef); int32_t tsdbSetKeepCfg(STsdb* pTsdb, STsdbCfg* pCfg); +int32_t tsdbGetStbIdList(SMeta* pMeta, int64_t suid, SArray* list); // tq int tqInit(); @@ -154,13 +163,16 @@ int tqPushMsg(STQ*, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver); int tqCommit(STQ*); int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd); int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId); -int32_t tqProcessCheckAlterInfoReq(STQ* pTq, char* msg, int32_t msgLen); -int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen); -int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen); -int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ver); +// tq-mq +int32_t tqProcessAddCheckInfoReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen); +int32_t tqProcessDelCheckInfoReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen); +int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen); +int32_t tqProcessVgDeleteReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen); +int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen); int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg); -int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen); -int32_t tqProcessTaskDropReq(STQ* pTq, char* msg, int32_t msgLen); +// tq-stream +int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen); +int32_t tqProcessTaskDropReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen); int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* data, int64_t ver); int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec); @@ -169,10 +181,9 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg); -int32_t tsdbGetStbIdList(SMeta* pMeta, int64_t suid, SArray* list); -SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid, - const char* stbFullName, int32_t vgId, SBatchDeleteReq* pDeleteReq); +SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid, + const char* stbFullName, SBatchDeleteReq* pDeleteReq); // sma int32_t smaInit(); @@ -187,7 +198,6 @@ int32_t smaAsyncPreCommit(SSma* pSma); int32_t smaAsyncCommit(SSma* pSma); int32_t smaAsyncPostCommit(SSma* pSma); int32_t smaDoRetention(SSma* pSma, int64_t now); -int32_t smaProcessFetch(SSma* pSma, void* pMsg); int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg); int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg); @@ -357,6 +367,7 @@ struct SSma { void smaHandleRes(void* pVnode, int64_t smaId, const SArray* data); enum { + SNAP_DATA_CFG = 0, SNAP_DATA_META = 1, SNAP_DATA_TSDB = 2, SNAP_DATA_DEL = 3, diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c new file mode 100644 index 0000000000000000000000000000000000000000..b8cc9f0df2f10d6bd42f6d68f801d2a749479626 --- /dev/null +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -0,0 +1,206 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#include "meta.h" + +#define META_CACHE_BASE_BUCKET 1024 + +// (uid , suid) : child table +// (uid, 0) : normal table +// (suid, suid) : super table +typedef struct SMetaCacheEntry SMetaCacheEntry; +struct SMetaCacheEntry { + SMetaCacheEntry* next; + SMetaInfo info; +}; + +struct SMetaCache { + int32_t nEntry; + int32_t nBucket; + SMetaCacheEntry** aBucket; +}; + +int32_t metaCacheOpen(SMeta* pMeta) { + int32_t code = 0; + SMetaCache* pCache = NULL; + + pCache = (SMetaCache*)taosMemoryMalloc(sizeof(SMetaCache)); + if (pCache == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + + pCache->nEntry = 0; + pCache->nBucket = META_CACHE_BASE_BUCKET; + pCache->aBucket = (SMetaCacheEntry**)taosMemoryCalloc(pCache->nBucket, sizeof(SMetaCacheEntry*)); + if (pCache->aBucket == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + taosMemoryFree(pCache); + goto _err; + } + + pMeta->pCache = pCache; + +_exit: + return code; + +_err: + metaError("vgId:%d meta open cache failed since %s", TD_VID(pMeta->pVnode), tstrerror(code)); + return code; +} + +void metaCacheClose(SMeta* pMeta) { + if (pMeta->pCache) { + for (int32_t iBucket = 0; iBucket < pMeta->pCache->nBucket; iBucket++) { + SMetaCacheEntry* pEntry = pMeta->pCache->aBucket[iBucket]; + while (pEntry) { + SMetaCacheEntry* tEntry = pEntry->next; + taosMemoryFree(pEntry); + pEntry = tEntry; + } + } + taosMemoryFree(pMeta->pCache->aBucket); + taosMemoryFree(pMeta->pCache); + pMeta->pCache = NULL; + } +} + +static int32_t metaRehashCache(SMetaCache* pCache, int8_t expand) { + int32_t code = 0; + int32_t nBucket; + + if (expand) { + nBucket = pCache->nBucket * 2; + } else { + nBucket = pCache->nBucket / 2; + } + + SMetaCacheEntry** aBucket = (SMetaCacheEntry**)taosMemoryCalloc(nBucket, sizeof(SMetaCacheEntry*)); + if (aBucket == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + + // rehash + for (int32_t iBucket = 0; iBucket < pCache->nBucket; iBucket++) { + SMetaCacheEntry* pEntry = pCache->aBucket[iBucket]; + + while (pEntry) { + SMetaCacheEntry* pTEntry = pEntry->next; + + pEntry->next = aBucket[TABS(pEntry->info.uid) % nBucket]; + aBucket[TABS(pEntry->info.uid) % nBucket] = pEntry; + + pEntry = pTEntry; + } + } + + // final set + taosMemoryFree(pCache->aBucket); + pCache->nBucket = nBucket; + pCache->aBucket = aBucket; + +_exit: + return code; +} + +int32_t metaCacheUpsert(SMeta* pMeta, SMetaInfo* pInfo) { + int32_t code = 0; + + // ASSERT(metaIsWLocked(pMeta)); + + // search + SMetaCache* pCache = pMeta->pCache; + int32_t iBucket = TABS(pInfo->uid) % pCache->nBucket; + SMetaCacheEntry** ppEntry = &pCache->aBucket[iBucket]; + while (*ppEntry && (*ppEntry)->info.uid != pInfo->uid) { + ppEntry = &(*ppEntry)->next; + } + + if (*ppEntry) { // update + ASSERT(pInfo->suid == (*ppEntry)->info.suid); + if (pInfo->version > (*ppEntry)->info.version) { + (*ppEntry)->info.version = pInfo->version; + (*ppEntry)->info.skmVer = pInfo->skmVer; + } + } else { // insert + if (pCache->nEntry >= pCache->nBucket) { + code = metaRehashCache(pCache, 1); + if (code) goto _exit; + + iBucket = TABS(pInfo->uid) % pCache->nBucket; + } + + SMetaCacheEntry* pEntryNew = (SMetaCacheEntry*)taosMemoryMalloc(sizeof(*pEntryNew)); + if (pEntryNew == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + + pEntryNew->info = *pInfo; + pEntryNew->next = pCache->aBucket[iBucket]; + pCache->aBucket[iBucket] = pEntryNew; + pCache->nEntry++; + } + +_exit: + return code; +} + +int32_t metaCacheDrop(SMeta* pMeta, int64_t uid) { + int32_t code = 0; + + SMetaCache* pCache = pMeta->pCache; + int32_t iBucket = TABS(uid) % pCache->nBucket; + SMetaCacheEntry** ppEntry = &pCache->aBucket[iBucket]; + while (*ppEntry && (*ppEntry)->info.uid != uid) { + ppEntry = &(*ppEntry)->next; + } + + SMetaCacheEntry* pEntry = *ppEntry; + if (pEntry) { + *ppEntry = pEntry->next; + taosMemoryFree(pEntry); + pCache->nEntry--; + if (pCache->nEntry < pCache->nBucket / 4 && pCache->nBucket > META_CACHE_BASE_BUCKET) { + code = metaRehashCache(pCache, 0); + if (code) goto _exit; + } + } else { + code = TSDB_CODE_NOT_FOUND; + } + +_exit: + return code; +} + +int32_t metaCacheGet(SMeta* pMeta, int64_t uid, SMetaInfo* pInfo) { + int32_t code = 0; + + SMetaCache* pCache = pMeta->pCache; + int32_t iBucket = TABS(uid) % pCache->nBucket; + SMetaCacheEntry* pEntry = pCache->aBucket[iBucket]; + + while (pEntry && pEntry->info.uid != uid) { + pEntry = pEntry->next; + } + + if (pEntry) { + *pInfo = pEntry->info; + } else { + code = TSDB_CODE_NOT_FOUND; + } + + return code; +} diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index 941d2c6d724db5b93c4d011988b83484a3e746ad..f8ecd17cb7a95f320bcfa54605742ddaadfe6cc6 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -73,7 +73,7 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) { } // open pUidIdx - ret = tdbTbOpen("uid.idx", sizeof(tb_uid_t), sizeof(int64_t), uidIdxKeyCmpr, pMeta->pEnv, &pMeta->pUidIdx); + ret = tdbTbOpen("uid.idx", sizeof(tb_uid_t), sizeof(SUidIdxVal), uidIdxKeyCmpr, pMeta->pEnv, &pMeta->pUidIdx); if (ret < 0) { metaError("vgId:%d, failed to open meta uid idx since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; @@ -87,7 +87,7 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) { } // open pCtbIdx - ret = tdbTbOpen("ctb.idx", sizeof(SCtbIdxKey), 0, ctbIdxKeyCmpr, pMeta->pEnv, &pMeta->pCtbIdx); + ret = tdbTbOpen("ctb.idx", sizeof(SCtbIdxKey), -1, ctbIdxKeyCmpr, pMeta->pEnv, &pMeta->pCtbIdx); if (ret < 0) { metaError("vgId:%d, failed to open meta child table index since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; @@ -143,6 +143,13 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) { goto _err; } + int32_t code = metaCacheOpen(pMeta); + if (code) { + terrno = code; + metaError("vgId:%d, failed to open meta cache since %s", TD_VID(pVnode), tstrerror(terrno)); + goto _err; + } + metaDebug("vgId:%d, meta is opened", TD_VID(pVnode)); *ppMeta = pMeta; @@ -169,6 +176,7 @@ _err: int metaClose(SMeta *pMeta) { if (pMeta) { + if (pMeta->pCache) metaCacheClose(pMeta); if (pMeta->pIdx) metaCloseIdx(pMeta); if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb); if (pMeta->pSmaIdx) tdbTbClose(pMeta->pSmaIdx); diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index eed0ae5e14761f25312c339f6fff9dae5059e144..7df355a59b1a1099faf19daf13c73db3cc9ea095 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -53,6 +53,89 @@ _err: return -1; } +// int metaGetTableEntryByUidTest(void* meta, SArray *uidList) { +// +// SArray* readerList = taosArrayInit(taosArrayGetSize(uidList), sizeof(SMetaReader)); +// SArray* uidVersion = taosArrayInit(taosArrayGetSize(uidList), sizeof(STbDbKey)); +// SMeta *pMeta = meta; +// int64_t version; +// SHashObj *uHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); +// +// int64_t stt1 = taosGetTimestampUs(); +// for(int i = 0; i < taosArrayGetSize(uidList); i++) { +// void* ppVal = NULL; +// int vlen = 0; +// uint64_t * uid = taosArrayGet(uidList, i); +// // query uid.idx +// if (tdbTbGet(pMeta->pUidIdx, uid, sizeof(*uid), &ppVal, &vlen) < 0) { +// continue; +// } +// version = *(int64_t *)ppVal; +// +// STbDbKey tbDbKey = {.version = version, .uid = *uid}; +// taosArrayPush(uidVersion, &tbDbKey); +// taosHashPut(uHash, uid, sizeof(int64_t), ppVal, sizeof(int64_t)); +// } +// int64_t stt2 = taosGetTimestampUs(); +// qDebug("metaGetTableEntryByUidTest1 rows:%d, cost:%ld us", taosArrayGetSize(uidList), stt2-stt1); +// +// TBC *pCur = NULL; +// tdbTbcOpen(pMeta->pTbDb, &pCur, NULL); +// tdbTbcMoveToFirst(pCur); +// void *pKey = NULL; +// int kLen = 0; +// +// while(1){ +// SMetaReader pReader = {0}; +// int32_t ret = tdbTbcNext(pCur, &pKey, &kLen, &pReader.pBuf, &pReader.szBuf); +// if (ret < 0) break; +// STbDbKey *tmp = (STbDbKey*)pKey; +// int64_t *ver = (int64_t*)taosHashGet(uHash, &tmp->uid, sizeof(int64_t)); +// if(ver == NULL || *ver != tmp->version) continue; +// taosArrayPush(readerList, &pReader); +// } +// tdbTbcClose(pCur); +// +// taosArrayClear(readerList); +// int64_t stt3 = taosGetTimestampUs(); +// qDebug("metaGetTableEntryByUidTest2 rows:%d, cost:%ld us", taosArrayGetSize(uidList), stt3-stt2); +// for(int i = 0; i < taosArrayGetSize(uidVersion); i++) { +// SMetaReader pReader = {0}; +// +// STbDbKey *tbDbKey = taosArrayGet(uidVersion, i); +// // query table.db +// if (tdbTbGet(pMeta->pTbDb, tbDbKey, sizeof(STbDbKey), &pReader.pBuf, &pReader.szBuf) < 0) { +// continue; +// } +// taosArrayPush(readerList, &pReader); +// } +// int64_t stt4 = taosGetTimestampUs(); +// qDebug("metaGetTableEntryByUidTest3 rows:%d, cost:%ld us", taosArrayGetSize(uidList), stt4-stt3); +// +// for(int i = 0; i < taosArrayGetSize(readerList); i++){ +// SMetaReader* pReader = taosArrayGet(readerList, i); +// metaReaderInit(pReader, meta, 0); +// // decode the entry +// tDecoderInit(&pReader->coder, pReader->pBuf, pReader->szBuf); +// +// if (metaDecodeEntry(&pReader->coder, &pReader->me) < 0) { +// } +// metaReaderClear(pReader); +// } +// int64_t stt5 = taosGetTimestampUs(); +// qDebug("metaGetTableEntryByUidTest4 rows:%d, cost:%ld us", taosArrayGetSize(readerList), stt5-stt4); +// return 0; +// } + +bool metaIsTableExist(SMeta *pMeta, tb_uid_t uid) { + // query uid.idx + if (tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), NULL, NULL) < 0) { + return false; + } + + return true; +} + int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid) { SMeta *pMeta = pReader->pMeta; int64_t version; @@ -63,7 +146,7 @@ int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid) { return -1; } - version = *(int64_t *)pReader->pBuf; + version = ((SUidIdxVal *)pReader->pBuf)[0].version; return metaGetTableEntryByVersion(pReader, version, uid); } @@ -160,7 +243,7 @@ int metaTbCursorNext(SMTbCursor *pTbCur) { tDecoderClear(&pTbCur->mr.coder); - metaGetTableEntryByVersion(&pTbCur->mr, *(int64_t *)pTbCur->pVal, *(tb_uid_t *)pTbCur->pKey); + metaGetTableEntryByVersion(&pTbCur->mr, ((SUidIdxVal *)pTbCur->pVal)[0].version, *(tb_uid_t *)pTbCur->pKey); if (pTbCur->mr.me.type == TSDB_SUPER_TABLE) { continue; } @@ -185,7 +268,7 @@ _query: goto _err; } - version = *(int64_t *)pData; + version = ((SUidIdxVal *)pData)[0].version; tdbTbGet(pMeta->pTbDb, &(STbDbKey){.uid = uid, .version = version}, sizeof(STbDbKey), &pData, &nData); SMetaEntry me = {0}; @@ -429,18 +512,65 @@ STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver) { } int32_t metaGetTbTSchemaEx(SMeta *pMeta, tb_uid_t suid, tb_uid_t uid, int32_t sver, STSchema **ppTSchema) { - int32_t code = 0; - STSchema *pTSchema = NULL; - SSkmDbKey skmDbKey = {.uid = suid ? suid : uid, .sver = sver}; + int32_t code = 0; + void *pData = NULL; int nData = 0; + SSkmDbKey skmDbKey; + if (sver <= 0) { + SMetaInfo info; + if (metaGetInfo(pMeta, suid ? suid : uid, &info) == 0) { + sver = info.skmVer; + } else { + TBC *pSkmDbC = NULL; + int c; + + skmDbKey.uid = suid ? suid : uid; + skmDbKey.sver = INT32_MAX; + + tdbTbcOpen(pMeta->pSkmDb, &pSkmDbC, NULL); + metaRLock(pMeta); + + if (tdbTbcMoveTo(pSkmDbC, &skmDbKey, sizeof(skmDbKey), &c) < 0) { + metaULock(pMeta); + tdbTbcClose(pSkmDbC); + code = TSDB_CODE_NOT_FOUND; + goto _exit; + } + + ASSERT(c); + + if (c < 0) { + tdbTbcMoveToPrev(pSkmDbC); + } + + const void *pKey = NULL; + int32_t nKey = 0; + tdbTbcGet(pSkmDbC, &pKey, &nKey, NULL, NULL); + + if (((SSkmDbKey *)pKey)->uid != skmDbKey.uid) { + metaULock(pMeta); + tdbTbcClose(pSkmDbC); + code = TSDB_CODE_NOT_FOUND; + goto _exit; + } + + sver = ((SSkmDbKey *)pKey)->sver; + + metaULock(pMeta); + tdbTbcClose(pSkmDbC); + } + } + + ASSERT(sver > 0); - // query + skmDbKey.uid = suid ? suid : uid; + skmDbKey.sver = sver; metaRLock(pMeta); - if (tdbTbGet(pMeta->pSkmDb, &skmDbKey, sizeof(skmDbKey), &pData, &nData) < 0) { - code = TSDB_CODE_NOT_FOUND; + if (tdbTbGet(pMeta->pSkmDb, &skmDbKey, sizeof(SSkmDbKey), &pData, &nData) < 0) { metaULock(pMeta); - goto _err; + code = TSDB_CODE_NOT_FOUND; + goto _exit; } metaULock(pMeta); @@ -462,15 +592,13 @@ int32_t metaGetTbTSchemaEx(SMeta *pMeta, tb_uid_t suid, tb_uid_t uid, int32_t sv SSchema *pSchema = pSchemaWrapper->pSchema + i; tdAddColToSchema(&sb, pSchema->type, pSchema->flags, pSchema->colId, pSchema->bytes); } - pTSchema = tdGetSchemaFromBuilder(&sb); + STSchema *pTSchema = tdGetSchemaFromBuilder(&sb); tdDestroyTSchemaBuilder(&sb); *ppTSchema = pTSchema; taosMemoryFree(pSchemaWrapper->pSchema); - return code; -_err: - *ppTSchema = NULL; +_exit: return code; } @@ -487,11 +615,15 @@ int64_t metaGetTbNum(SMeta *pMeta) { // N.B. Called by statusReq per second int64_t metaGetTimeSeriesNum(SMeta *pMeta) { // sum of (number of columns of stable - 1) * number of ctables (excluding timestamp column) - int64_t num = 0; - vnodeGetTimeSeriesNum(pMeta->pVnode, &num); - pMeta->pVnode->config.vndStats.numOfTimeSeries = num; + if (pMeta->pVnode->config.vndStats.numOfTimeSeries <= 0 || ++pMeta->pVnode->config.vndStats.itvTimeSeries % 60 == 0) { + int64_t num = 0; + vnodeGetTimeSeriesNum(pMeta->pVnode, &num); + pMeta->pVnode->config.vndStats.numOfTimeSeries = num; + + pMeta->pVnode->config.vndStats.itvTimeSeries = 0; + } - return pMeta->pVnode->config.vndStats.numOfTimeSeries; + return pMeta->pVnode->config.vndStats.numOfTimeSeries + pMeta->pVnode->config.vndStats.numOfNTimeSeries; } typedef struct { @@ -749,9 +881,8 @@ SArray *metaGetSmaTbUids(SMeta *pMeta) { #endif -const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t type, STagVal *val) { - ASSERT(pEntry->type == TSDB_CHILD_TABLE); - STag *tag = (STag *)pEntry->ctbEntry.pTags; +const void *metaGetTableTagVal(void *pTag, int16_t type, STagVal *val) { + STag *tag = (STag *)pTag; if (type == TSDB_DATA_TYPE_JSON) { return tag; } @@ -760,6 +891,37 @@ const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t type, STagVal *val) { if (!find) { return NULL; } + +#ifdef TAG_FILTER_DEBUG + if (IS_VAR_DATA_TYPE(val->type)) { + char *buf = taosMemoryCalloc(val->nData + 1, 1); + memcpy(buf, val->pData, val->nData); + metaDebug("metaTag table val varchar index:%d cid:%d type:%d value:%s", 1, val->cid, val->type, buf); + taosMemoryFree(buf); + } else { + double dval = 0; + GET_TYPED_DATA(dval, double, val->type, &val->i64); + metaDebug("metaTag table val number index:%d cid:%d type:%d value:%f", 1, val->cid, val->type, dval); + } + + SArray *pTagVals = NULL; + tTagToValArray((STag *)pTag, &pTagVals); + for (int i = 0; i < taosArrayGetSize(pTagVals); i++) { + STagVal *pTagVal = (STagVal *)taosArrayGet(pTagVals, i); + + if (IS_VAR_DATA_TYPE(pTagVal->type)) { + char *buf = taosMemoryCalloc(pTagVal->nData + 1, 1); + memcpy(buf, pTagVal->pData, pTagVal->nData); + metaDebug("metaTag table varchar index:%d cid:%d type:%d value:%s", i, pTagVal->cid, pTagVal->type, buf); + taosMemoryFree(buf); + } else { + double dval = 0; + GET_TYPED_DATA(dval, double, pTagVal->type, &pTagVal->i64); + metaDebug("metaTag table number index:%d cid:%d type:%d value:%f", i, pTagVal->cid, pTagVal->type, dval); + } + } +#endif + return val; } @@ -853,6 +1015,9 @@ int32_t metaFilterTableIds(SMeta *pMeta, SMetaFltParam *param, SArray *pUids) { break; } } + if (p->suid != pKey->suid) { + break; + } first = false; if (p != NULL) { int32_t cmp = (*param->filterFunc)(p->data, pKey->data, pKey->type); @@ -888,3 +1053,75 @@ END: return ret; } + +int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj *tags) { + SMCtbCursor *pCur = metaOpenCtbCursor(pMeta, suid); + + SHashObj *uHash = NULL; + size_t len = taosArrayGetSize(uidList); // len > 0 means there already have uids + if (len > 0) { + uHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + for (int i = 0; i < len; i++) { + int64_t *uid = taosArrayGet(uidList, i); + taosHashPut(uHash, uid, sizeof(int64_t), &i, sizeof(i)); + } + } + while (1) { + tb_uid_t id = metaCtbCursorNext(pCur); + if (id == 0) { + break; + } + + if (len > 0 && taosHashGet(uHash, &id, sizeof(int64_t)) == NULL) { + continue; + } else if (len == 0) { + taosArrayPush(uidList, &id); + } + + taosHashPut(tags, &id, sizeof(int64_t), pCur->pVal, pCur->vLen); + } + + taosHashCleanup(uHash); + metaCloseCtbCursor(pCur); + return TSDB_CODE_SUCCESS; +} + +int32_t metaCacheGet(SMeta *pMeta, int64_t uid, SMetaInfo *pInfo); + +int32_t metaGetInfo(SMeta *pMeta, int64_t uid, SMetaInfo *pInfo) { + int32_t code = 0; + void *pData = NULL; + int nData = 0; + + metaRLock(pMeta); + + // search cache + if (metaCacheGet(pMeta, uid, pInfo) == 0) { + metaULock(pMeta); + goto _exit; + } + + // search TDB + if (tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData) < 0) { + // not found + metaULock(pMeta); + code = TSDB_CODE_NOT_FOUND; + goto _exit; + } + + metaULock(pMeta); + + pInfo->uid = uid; + pInfo->suid = ((SUidIdxVal *)pData)->suid; + pInfo->version = ((SUidIdxVal *)pData)->version; + pInfo->skmVer = ((SUidIdxVal *)pData)->skmVer; + + // upsert the cache + metaWLock(pMeta); + metaCacheUpsert(pMeta, pInfo); + metaULock(pMeta); + +_exit: + tdbFree(pData); + return code; +} diff --git a/source/dnode/vnode/src/meta/metaSma.c b/source/dnode/vnode/src/meta/metaSma.c index 1e5b699fce275f2333fc1b60bcc723ed3507a222..3ada7d1814b241081e52dc5e7ac8e104288ee3ad 100644 --- a/source/dnode/vnode/src/meta/metaSma.c +++ b/source/dnode/vnode/src/meta/metaSma.c @@ -28,9 +28,9 @@ int32_t metaCreateTSma(SMeta *pMeta, int64_t version, SSmaCfg *pCfg) { int vLen = 0; const void *pKey = NULL; const void *pVal = NULL; - void * pBuf = NULL; + void *pBuf = NULL; int32_t szBuf = 0; - void * p = NULL; + void *p = NULL; SMetaReader mr = {0}; // validate req @@ -83,8 +83,8 @@ int32_t metaDropTSma(SMeta *pMeta, int64_t indexUid) { static int metaSaveSmaToDB(SMeta *pMeta, const SMetaEntry *pME) { STbDbKey tbDbKey; - void * pKey = NULL; - void * pVal = NULL; + void *pKey = NULL; + void *pVal = NULL; int kLen = 0; int vLen = 0; SEncoder coder = {0}; @@ -130,7 +130,8 @@ _err: } static int metaUpdateUidIdx(SMeta *pMeta, const SMetaEntry *pME) { - return tdbTbInsert(pMeta->pUidIdx, &pME->uid, sizeof(tb_uid_t), &pME->version, sizeof(int64_t), &pMeta->txn); + SUidIdxVal uidIdxVal = {.suid = pME->smaEntry.tsma->indexUid, .version = pME->version, .skmVer = 0}; + return tdbTbInsert(pMeta->pUidIdx, &pME->uid, sizeof(tb_uid_t), &uidIdxVal, sizeof(uidIdxVal), &pMeta->txn); } static int metaUpdateNameIdx(SMeta *pMeta, const SMetaEntry *pME) { diff --git a/source/dnode/vnode/src/meta/metaSnapshot.c b/source/dnode/vnode/src/meta/metaSnapshot.c index 973c3814074685128395bd50243bba8981af4200..0edbd092e6b06883cc1e2b6be66e0ea55b8563a1 100644 --- a/source/dnode/vnode/src/meta/metaSnapshot.c +++ b/source/dnode/vnode/src/meta/metaSnapshot.c @@ -195,3 +195,434 @@ _err: metaError("vgId:%d, vnode snapshot meta write failed since %s", TD_VID(pMeta->pVnode), tstrerror(code)); return code; } + +typedef struct STableInfoForChildTable{ + char *tableName; + SSchemaWrapper *schemaRow; + SSchemaWrapper *tagRow; +}STableInfoForChildTable; + +static void destroySTableInfoForChildTable(void* data) { + STableInfoForChildTable* pData = (STableInfoForChildTable*)data; + taosMemoryFree(pData->tableName); + tDeleteSSchemaWrapper(pData->schemaRow); + tDeleteSSchemaWrapper(pData->tagRow); +} + +static void MoveToSnapShotVersion(SSnapContext* ctx){ + tdbTbcClose(ctx->pCur); + tdbTbcOpen(ctx->pMeta->pTbDb, &ctx->pCur, NULL); + STbDbKey key = {.version = ctx->snapVersion, .uid = INT64_MAX}; + int c = 0; + tdbTbcMoveTo(ctx->pCur, &key, sizeof(key), &c); + if(c < 0){ + tdbTbcMoveToPrev(ctx->pCur); + } +} + +static int32_t MoveToPosition(SSnapContext* ctx, int64_t ver, int64_t uid){ + tdbTbcClose(ctx->pCur); + tdbTbcOpen(ctx->pMeta->pTbDb, &ctx->pCur, NULL); + STbDbKey key = {.version = ver, .uid = uid}; + int c = 0; + tdbTbcMoveTo(ctx->pCur, &key, sizeof(key), &c); + return c; +} + +static void MoveToFirst(SSnapContext* ctx){ + tdbTbcClose(ctx->pCur); + tdbTbcOpen(ctx->pMeta->pTbDb, &ctx->pCur, NULL); + tdbTbcMoveToFirst(ctx->pCur); +} + +static void saveSuperTableInfoForChildTable(SMetaEntry *me, SHashObj *suidInfo){ + STableInfoForChildTable* data = (STableInfoForChildTable*)taosHashGet(suidInfo, &me->uid, sizeof(tb_uid_t)); + if(data){ + return; + } + STableInfoForChildTable dataTmp = {0}; + dataTmp.tableName = strdup(me->name); + + dataTmp.schemaRow = tCloneSSchemaWrapper(&me->stbEntry.schemaRow); + dataTmp.tagRow = tCloneSSchemaWrapper(&me->stbEntry.schemaTag); + taosHashPut(suidInfo, &me->uid, sizeof(tb_uid_t), &dataTmp, sizeof(STableInfoForChildTable)); +} + +int32_t buildSnapContext(SMeta* pMeta, int64_t snapVersion, int64_t suid, int8_t subType, bool withMeta, SSnapContext** ctxRet){ + SSnapContext* ctx = taosMemoryCalloc(1, sizeof(SSnapContext)); + if(ctx == NULL) return -1; + *ctxRet = ctx; + ctx->pMeta = pMeta; + ctx->snapVersion = snapVersion; + ctx->suid = suid; + ctx->subType = subType; + ctx->queryMetaOrData = withMeta; + ctx->withMeta = withMeta; + ctx->idVersion = taosHashInit(100, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); + if(ctx->idVersion == NULL){ + return -1; + } + + ctx->suidInfo = taosHashInit(100, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); + if(ctx->suidInfo == NULL){ + return -1; + } + taosHashSetFreeFp(ctx->suidInfo, destroySTableInfoForChildTable); + + ctx->index = 0; + ctx->idList = taosArrayInit(100, sizeof(int64_t)); + void *pKey = NULL; + void *pVal = NULL; + int vLen = 0, kLen = 0; + + metaDebug("tmqsnap init snapVersion:%" PRIi64, ctx->snapVersion); + MoveToFirst(ctx); + while(1){ + int32_t ret = tdbTbcNext(ctx->pCur, &pKey, &kLen, &pVal, &vLen); + if (ret < 0) break; + STbDbKey *tmp = (STbDbKey*)pKey; + if (tmp->version > ctx->snapVersion) break; + + SIdInfo* idData = (SIdInfo*)taosHashGet(ctx->idVersion, &tmp->uid, sizeof(tb_uid_t)); + if(idData) { + continue; + } + + if (tdbTbGet(pMeta->pUidIdx, &tmp->uid, sizeof(tb_uid_t), NULL, NULL) < 0) { // check if table exist for now, need optimize later + continue; + } + + SDecoder dc = {0}; + SMetaEntry me = {0}; + tDecoderInit(&dc, pVal, vLen); + metaDecodeEntry(&dc, &me); + if(ctx->subType == TOPIC_SUB_TYPE__TABLE){ + if ((me.uid != ctx->suid && me.type == TSDB_SUPER_TABLE) || + (me.ctbEntry.suid != ctx->suid && me.type == TSDB_CHILD_TABLE)){ + tDecoderClear(&dc); + continue; + } + } + + taosArrayPush(ctx->idList, &tmp->uid); + metaDebug("tmqsnap init idlist name:%s, uid:%" PRIi64, me.name, tmp->uid); + SIdInfo info = {0}; + taosHashPut(ctx->idVersion, &tmp->uid, sizeof(tb_uid_t), &info, sizeof(SIdInfo)); + + tDecoderClear(&dc); + } + taosHashClear(ctx->idVersion); + + MoveToSnapShotVersion(ctx); + while(1){ + int32_t ret = tdbTbcPrev(ctx->pCur, &pKey, &kLen, &pVal, &vLen); + if (ret < 0) break; + + STbDbKey *tmp = (STbDbKey*)pKey; + SIdInfo* idData = (SIdInfo*)taosHashGet(ctx->idVersion, &tmp->uid, sizeof(tb_uid_t)); + if(idData){ + continue; + } + SIdInfo info = {.version = tmp->version, .index = 0}; + taosHashPut(ctx->idVersion, &tmp->uid, sizeof(tb_uid_t), &info, sizeof(SIdInfo)); + + SDecoder dc = {0}; + SMetaEntry me = {0}; + tDecoderInit(&dc, pVal, vLen); + metaDecodeEntry(&dc, &me); + if(ctx->subType == TOPIC_SUB_TYPE__TABLE){ + if ((me.uid != ctx->suid && me.type == TSDB_SUPER_TABLE) || + (me.ctbEntry.suid != ctx->suid && me.type == TSDB_CHILD_TABLE)){ + tDecoderClear(&dc); + continue; + } + } + + if ((ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_SUPER_TABLE) + || (ctx->subType == TOPIC_SUB_TYPE__TABLE && me.uid == ctx->suid)) { + saveSuperTableInfoForChildTable(&me, ctx->suidInfo); + } + tDecoderClear(&dc); + } + + for(int i = 0; i < taosArrayGetSize(ctx->idList); i++){ + int64_t *uid = taosArrayGet(ctx->idList, i); + SIdInfo* idData = (SIdInfo*)taosHashGet(ctx->idVersion, uid, sizeof(int64_t)); + ASSERT(idData); + idData->index = i; + metaDebug("tmqsnap init idVersion uid:%" PRIi64 " version:%" PRIi64 " index:%d", *uid, idData->version, idData->index); + } + + return TDB_CODE_SUCCESS; +} + +int32_t destroySnapContext(SSnapContext* ctx){ + tdbTbcClose(ctx->pCur); + taosArrayDestroy(ctx->idList); + taosHashCleanup(ctx->idVersion); + taosHashCleanup(ctx->suidInfo); + taosMemoryFree(ctx); + return 0; +} + +static int32_t buildNormalChildTableInfo(SVCreateTbReq *req, void **pBuf, int32_t *contLen){ + int32_t ret = 0; + SVCreateTbBatchReq reqs = {0}; + + reqs.pArray = taosArrayInit(1, sizeof(struct SVCreateTbReq)); + if (NULL == reqs.pArray){ + ret = -1; + goto end; + } + taosArrayPush(reqs.pArray, req); + reqs.nReqs = 1; + + tEncodeSize(tEncodeSVCreateTbBatchReq, &reqs, *contLen, ret); + if(ret < 0){ + ret = -1; + goto end; + } + *contLen += sizeof(SMsgHead); + *pBuf = taosMemoryMalloc(*contLen); + if (NULL == *pBuf) { + ret = -1; + goto end; + } + SEncoder coder = {0}; + tEncoderInit(&coder, POINTER_SHIFT(*pBuf, sizeof(SMsgHead)), *contLen); + if (tEncodeSVCreateTbBatchReq(&coder, &reqs) < 0) { + taosMemoryFreeClear(*pBuf); + tEncoderClear(&coder); + ret = -1; + goto end; + } + tEncoderClear(&coder); + +end: + taosArrayDestroy(reqs.pArray); + return ret; +} + +static int32_t buildSuperTableInfo(SVCreateStbReq *req, void **pBuf, int32_t *contLen){ + int32_t ret = 0; + tEncodeSize(tEncodeSVCreateStbReq, req, *contLen, ret); + if (ret < 0) { + return -1; + } + + *contLen += sizeof(SMsgHead); + *pBuf = taosMemoryMalloc(*contLen); + if (NULL == *pBuf) { + return -1; + } + + SEncoder encoder = {0}; + tEncoderInit(&encoder, POINTER_SHIFT(*pBuf, sizeof(SMsgHead)), *contLen); + if (tEncodeSVCreateStbReq(&encoder, req) < 0) { + taosMemoryFreeClear(*pBuf); + tEncoderClear(&encoder); + return -1; + } + tEncoderClear(&encoder); + return 0; +} + +int32_t setForSnapShot(SSnapContext* ctx, int64_t uid){ + int c = 0; + + if(uid == 0){ + ctx->index = 0; + return c; + } + + SIdInfo* idInfo = (SIdInfo*)taosHashGet(ctx->idVersion, &uid, sizeof(tb_uid_t)); + if(!idInfo){ + return -1; + } + + ctx->index = idInfo->index; + + return c; +} + +int32_t getMetafromSnapShot(SSnapContext* ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t *uid){ + int32_t ret = 0; + void *pKey = NULL; + void *pVal = NULL; + int vLen = 0, kLen = 0; + + while(1){ + if(ctx->index >= taosArrayGetSize(ctx->idList)){ + metaDebug("tmqsnap get meta end"); + ctx->index = 0; + ctx->queryMetaOrData = false; // change to get data + return 0; + } + + int64_t* uidTmp = taosArrayGet(ctx->idList, ctx->index); + ctx->index++; + SIdInfo* idInfo = (SIdInfo*)taosHashGet(ctx->idVersion, uidTmp, sizeof(tb_uid_t)); + ASSERT(idInfo); + + *uid = *uidTmp; + ret = MoveToPosition(ctx, idInfo->version, *uidTmp); + if(ret == 0){ + break; + } + metaDebug("tmqsnap get meta not exist uid:%" PRIi64 " version:%" PRIi64, *uid, idInfo->version); + } + + tdbTbcGet(ctx->pCur, (const void**)&pKey, &kLen, (const void**)&pVal, &vLen); + SDecoder dc = {0}; + SMetaEntry me = {0}; + tDecoderInit(&dc, pVal, vLen); + metaDecodeEntry(&dc, &me); + metaDebug("tmqsnap get meta uid:%" PRIi64 " name:%s index:%d", *uid, me.name, ctx->index-1); + + if ((ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_SUPER_TABLE) + || (ctx->subType == TOPIC_SUB_TYPE__TABLE && me.uid == ctx->suid)) { + SVCreateStbReq req = {0}; + req.name = me.name; + req.suid = me.uid; + req.schemaRow = me.stbEntry.schemaRow; + req.schemaTag = me.stbEntry.schemaTag; + req.schemaRow.version = 1; + req.schemaTag.version = 1; + + ret = buildSuperTableInfo(&req, pBuf, contLen); + *type = TDMT_VND_CREATE_STB; + + } else if ((ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_CHILD_TABLE) + || (ctx->subType == TOPIC_SUB_TYPE__TABLE && me.type == TSDB_CHILD_TABLE && me.ctbEntry.suid == ctx->suid)) { + STableInfoForChildTable* data = (STableInfoForChildTable*)taosHashGet(ctx->suidInfo, &me.ctbEntry.suid, sizeof(tb_uid_t)); + ASSERT(data); + SVCreateTbReq req = {0}; + + req.type = TSDB_CHILD_TABLE; + req.name = me.name; + req.uid = me.uid; + req.commentLen = -1; + req.ctb.suid = me.ctbEntry.suid; + req.ctb.tagNum = data->tagRow->nCols; + req.ctb.name = data->tableName; + + SArray* tagName = taosArrayInit(req.ctb.tagNum, TSDB_COL_NAME_LEN); + STag* p = (STag*)me.ctbEntry.pTags; + if(tTagIsJson(p)){ + if (p->nTag != 0) { + SSchema* schema = &data->tagRow->pSchema[0]; + taosArrayPush(tagName, schema->name); + } + }else{ + SArray* pTagVals = NULL; + if (tTagToValArray((const STag*)p, &pTagVals) != 0) { + ASSERT(0); + } + int16_t nCols = taosArrayGetSize(pTagVals); + for (int j = 0; j < nCols; ++j) { + STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, j); + for(int i = 0; i < data->tagRow->nCols; i++){ + SSchema *schema = &data->tagRow->pSchema[i]; + if(schema->colId == pTagVal->cid){ + taosArrayPush(tagName, schema->name); + } + } + } + } +// SIdInfo* sidInfo = (SIdInfo*)taosHashGet(ctx->idVersion, &me.ctbEntry.suid, sizeof(tb_uid_t)); +// if(sidInfo->version >= idInfo->version){ +// // need parse tag +// STag* p = (STag*)me.ctbEntry.pTags; +// SArray* pTagVals = NULL; +// if (tTagToValArray((const STag*)p, &pTagVals) != 0) { +// } +// +// int16_t nCols = taosArrayGetSize(pTagVals); +// for (int j = 0; j < nCols; ++j) { +// STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, j); +// } +// }else{ + req.ctb.pTag = me.ctbEntry.pTags; +// } + + req.ctb.tagName = tagName; + ret = buildNormalChildTableInfo(&req, pBuf, contLen); + *type = TDMT_VND_CREATE_TABLE; + taosArrayDestroy(tagName); + } else if(ctx->subType == TOPIC_SUB_TYPE__DB){ + SVCreateTbReq req = {0}; + req.type = TSDB_NORMAL_TABLE; + req.name = me.name; + req.uid = me.uid; + req.commentLen = -1; + req.ntb.schemaRow = me.ntbEntry.schemaRow; + ret = buildNormalChildTableInfo(&req, pBuf, contLen); + *type = TDMT_VND_CREATE_TABLE; + } else{ + ASSERT(0); + } + tDecoderClear(&dc); + + return ret; +} + +SMetaTableInfo getUidfromSnapShot(SSnapContext* ctx){ + SMetaTableInfo result = {0}; + void *pKey = NULL; + void *pVal = NULL; + int vLen, kLen; + + while(1){ + if(ctx->index >= taosArrayGetSize(ctx->idList)){ + metaDebug("tmqsnap get uid info end"); + return result; + } + int64_t* uidTmp = taosArrayGet(ctx->idList, ctx->index); + ctx->index++; + SIdInfo* idInfo = (SIdInfo*)taosHashGet(ctx->idVersion, uidTmp, sizeof(tb_uid_t)); + ASSERT(idInfo); + + int32_t ret = MoveToPosition(ctx, idInfo->version, *uidTmp); + if(ret != 0) { + metaDebug("tmqsnap getUidfromSnapShot not exist uid:%" PRIi64 " version:%" PRIi64, *uidTmp, idInfo->version); + continue; + } + tdbTbcGet(ctx->pCur, (const void**)&pKey, &kLen, (const void**)&pVal, &vLen); + SDecoder dc = {0}; + SMetaEntry me = {0}; + tDecoderInit(&dc, pVal, vLen); + metaDecodeEntry(&dc, &me); + metaDebug("tmqsnap get uid info uid:%" PRIi64 " name:%s index:%d", me.uid, me.name, ctx->index-1); + + if (ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_CHILD_TABLE){ + STableInfoForChildTable* data = (STableInfoForChildTable*)taosHashGet(ctx->suidInfo, &me.ctbEntry.suid, sizeof(tb_uid_t)); + result.uid = me.uid; + result.suid = me.ctbEntry.suid; + result.schema = tCloneSSchemaWrapper(data->schemaRow); + strcpy(result.tbName, me.name); + tDecoderClear(&dc); + break; + } else if (ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_NORMAL_TABLE) { + result.uid = me.uid; + result.suid = 0; + strcpy(result.tbName, me.name); + result.schema = tCloneSSchemaWrapper(&me.ntbEntry.schemaRow); + tDecoderClear(&dc); + break; + } else if(ctx->subType == TOPIC_SUB_TYPE__TABLE && me.type == TSDB_CHILD_TABLE && me.ctbEntry.suid == ctx->suid) { + STableInfoForChildTable* data = (STableInfoForChildTable*)taosHashGet(ctx->suidInfo, &me.ctbEntry.suid, sizeof(tb_uid_t)); + result.uid = me.uid; + result.suid = me.ctbEntry.suid; + strcpy(result.tbName, me.name); + result.schema = tCloneSSchemaWrapper(data->schemaRow); + tDecoderClear(&dc); + break; + } else{ + metaDebug("tmqsnap get uid continue"); + tDecoderClear(&dc); + continue; + } + } + + return result; +} diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 3e7fd9df2b299a66a49b98a16aff1e661113532c..583a2e098f8a54ac61f21d696c6e65c62cd5c4ab 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -27,6 +27,23 @@ static int metaUpdateSuidIdx(SMeta *pMeta, const SMetaEntry *pME); static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry); static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type); +static void metaGetEntryInfo(const SMetaEntry *pEntry, SMetaInfo *pInfo) { + pInfo->uid = pEntry->uid; + pInfo->version = pEntry->version; + if (pEntry->type == TSDB_SUPER_TABLE) { + pInfo->suid = pEntry->uid; + pInfo->skmVer = pEntry->stbEntry.schemaRow.version; + } else if (pEntry->type == TSDB_CHILD_TABLE) { + pInfo->suid = pEntry->ctbEntry.suid; + pInfo->skmVer = 0; + } else if (pEntry->type == TSDB_NORMAL_TABLE) { + pInfo->suid = 0; + pInfo->skmVer = pEntry->ntbEntry.schemaRow.version; + } else { + ASSERT(0); + } +} + static int metaUpdateMetaRsp(tb_uid_t uid, char *tbName, SSchemaWrapper *pSchema, STableMetaRsp *pMetaRsp) { pMetaRsp->pSchemas = taosMemoryMalloc(pSchema->nCols * sizeof(SSchema)); if (NULL == pMetaRsp->pSchemas) { @@ -82,6 +99,7 @@ static int metaSaveJsonVarToIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, const memcpy(val, (uint16_t *)&len, VARSTR_HEADER_SIZE); type = TSDB_DATA_TYPE_VARCHAR; term = indexTermCreate(suid, ADD_VALUE, type, key, nKey, val, len); + taosMemoryFree(val); } else if (pTagVal->nData == 0) { term = indexTermCreate(suid, ADD_VALUE, TSDB_DATA_TYPE_VARCHAR, key, nKey, pTagVal->pData, 0); } @@ -98,6 +116,7 @@ static int metaSaveJsonVarToIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, const indexMultiTermAdd(terms, term); } } + taosArrayDestroy(pTagVals); indexJsonPut(pMeta->pTagIvtIdx, terms, tuid); indexMultiTermDestroy(terms); #endif @@ -171,22 +190,22 @@ int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { void *pBuf = NULL; int32_t szBuf = 0; void *p = NULL; - SMetaReader mr = {0}; // validate req - metaReaderInit(&mr, pMeta, 0); - if (metaGetTableEntryByName(&mr, pReq->name) == 0) { -// TODO: just for pass case -#if 0 - terrno = TSDB_CODE_TDB_STB_ALREADY_EXIST; - metaReaderClear(&mr); - return -1; -#else - metaReaderClear(&mr); - return 0; -#endif + void *pData = NULL; + int nData = 0; + if (tdbTbGet(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pData, &nData) == 0) { + tb_uid_t uid = *(tb_uid_t *)pData; + tdbFree(pData); + SMetaInfo info; + metaGetInfo(pMeta, uid, &info); + if (info.uid == info.suid) { + return 0; + } else { + terrno = TSDB_CODE_TDB_TABLE_ALREADY_EXIST; + return -1; + } } - metaReaderClear(&mr); // set structs me.version = version; @@ -265,8 +284,8 @@ int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq, SArray *tb // drop super table _drop_super_table: tdbTbGet(pMeta->pUidIdx, &pReq->suid, sizeof(tb_uid_t), &pData, &nData); - tdbTbDelete(pMeta->pTbDb, &(STbDbKey){.version = *(int64_t *)pData, .uid = pReq->suid}, sizeof(STbDbKey), - &pMeta->txn); + tdbTbDelete(pMeta->pTbDb, &(STbDbKey){.version = ((SUidIdxVal *)pData)[0].version, .uid = pReq->suid}, + sizeof(STbDbKey), &pMeta->txn); tdbTbDelete(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pMeta->txn); tdbTbDelete(pMeta->pUidIdx, &pReq->suid, sizeof(tb_uid_t), &pMeta->txn); tdbTbDelete(pMeta->pSuidIdx, &pReq->suid, sizeof(tb_uid_t), &pMeta->txn); @@ -298,18 +317,18 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { tdbTbcClose(pUidIdxc); terrno = TSDB_CODE_TDB_STB_NOT_EXIST; - // ASSERT(0); return -1; } ret = tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData); if (ret < 0) { + tdbTbcClose(pUidIdxc); + terrno = TSDB_CODE_TDB_STB_NOT_EXIST; - // ASSERT(0); return -1; } - oversion = *(int64_t *)pData; + oversion = ((SUidIdxVal *)pData)[0].version; tdbTbcOpen(pMeta->pTbDb, &pTbDbc, &pMeta->txn); ret = tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = pReq->suid, .version = oversion}), sizeof(STbDbKey), &c); @@ -336,15 +355,11 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { metaSaveToSkmDb(pMeta, &nStbEntry); } - // if (oStbEntry.stbEntry.schemaTag.sver != pReq->schemaTag.sver) { - // // change tag schema - // } - // update table.db metaSaveToTbDb(pMeta, &nStbEntry); // update uid index - tdbTbcUpsert(pUidIdxc, &pReq->suid, sizeof(tb_uid_t), &version, sizeof(version), 0); + metaUpdateUidIdx(pMeta, &nStbEntry); if (oStbEntry.pBuf) taosMemoryFree(oStbEntry.pBuf); metaULock(pMeta); @@ -354,7 +369,7 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { return 0; } -int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) { +int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq, STableMetaRsp **pMetaRsp) { SMetaEntry me = {0}; SMetaReader mr = {0}; @@ -400,6 +415,25 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) { me.ctbEntry.suid = pReq->ctb.suid; me.ctbEntry.pTags = pReq->ctb.pTag; +#ifdef TAG_FILTER_DEBUG + SArray* pTagVals = NULL; + int32_t code = tTagToValArray((STag*)pReq->ctb.pTag, &pTagVals); + for (int i = 0; i < taosArrayGetSize(pTagVals); i++) { + STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, i); + + if (IS_VAR_DATA_TYPE(pTagVal->type)) { + char* buf = taosMemoryCalloc(pTagVal->nData + 1, 1); + memcpy(buf, pTagVal->pData, pTagVal->nData); + metaDebug("metaTag table:%s varchar index:%d cid:%d type:%d value:%s", pReq->name, i, pTagVal->cid, pTagVal->type, buf); + taosMemoryFree(buf); + } else { + double val = 0; + GET_TYPED_DATA(val, double, pTagVal->type, &pTagVal->i64); + metaDebug("metaTag table:%s number index:%d cid:%d type:%d value:%f", pReq->name, i, pTagVal->cid, pTagVal->type, val); + } + } +#endif + ++pMeta->pVnode->config.vndStats.numOfCTables; } else { me.ntbEntry.ctime = pReq->ctime; @@ -410,10 +444,26 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) { me.ntbEntry.ncid = me.ntbEntry.schemaRow.pSchema[me.ntbEntry.schemaRow.nCols - 1].colId + 1; ++pMeta->pVnode->config.vndStats.numOfNTables; + pMeta->pVnode->config.vndStats.numOfNTimeSeries += me.ntbEntry.schemaRow.nCols - 1; } if (metaHandleEntry(pMeta, &me) < 0) goto _err; + if (pMetaRsp) { + *pMetaRsp = taosMemoryCalloc(1, sizeof(STableMetaRsp)); + + if (*pMetaRsp) { + if (me.type == TSDB_CHILD_TABLE) { + (*pMetaRsp)->tableType = TSDB_CHILD_TABLE; + (*pMetaRsp)->tuid = pReq->uid; + (*pMetaRsp)->suid = pReq->ctb.suid; + strcpy((*pMetaRsp)->tbName, pReq->name); + } else { + metaUpdateMetaRsp(pReq->uid, pReq->name, &pReq->ntb.schemaRow, *pMetaRsp); + } + } + } + metaDebug("vgId:%d, table:%s uid %" PRId64 " is created, type:%" PRId8, TD_VID(pMeta->pVnode), pReq->name, pReq->uid, pReq->type); return 0; @@ -503,7 +553,10 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) { SDecoder dc = {0}; rc = tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData); - int64_t version = *(int64_t *)pData; + if (rc < 0) { + return -1; + } + int64_t version = ((SUidIdxVal *)pData)[0].version; tdbTbGet(pMeta->pTbDb, &(STbDbKey){.version = version, .uid = uid}, sizeof(STbDbKey), &pData, &nData); @@ -517,7 +570,7 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) { int tLen = 0; if (tdbTbGet(pMeta->pUidIdx, &e.ctbEntry.suid, sizeof(tb_uid_t), &tData, &tLen) == 0) { - version = *(int64_t *)tData; + version = ((SUidIdxVal *)tData)[0].version; STbDbKey tbDbKey = {.uid = e.ctbEntry.suid, .version = version}; if (tdbTbGet(pMeta->pTbDb, &tbDbKey, sizeof(tbDbKey), &tData, &tLen) == 0) { SDecoder tdc = {0}; @@ -549,6 +602,7 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) { // drop schema.db (todo) --pMeta->pVnode->config.vndStats.numOfNTables; + pMeta->pVnode->config.vndStats.numOfNTimeSeries -= e.ntbEntry.schemaRow.nCols - 1; } else if (e.type == TSDB_SUPER_TABLE) { tdbTbDelete(pMeta->pSuidIdx, &e.uid, sizeof(tb_uid_t), &pMeta->txn); // drop schema.db (todo) @@ -556,6 +610,8 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) { --pMeta->pVnode->config.vndStats.numOfSTables; } + metaCacheDrop(pMeta, uid); + tDecoderClear(&dc); tdbFree(pData); @@ -594,7 +650,7 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl ASSERT(c == 0); tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData); - oversion = *(int64_t *)pData; + oversion = ((SUidIdxVal *)pData)[0].version; // search table.db TBC *pTbDbc = NULL; @@ -649,6 +705,8 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].flags = pAlterTbReq->flags; pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].colId = entry.ntbEntry.ncid++; strcpy(pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].name, pAlterTbReq->colName); + + ++pMeta->pVnode->config.vndStats.numOfNTimeSeries; break; case TSDB_ALTER_TABLE_DROP_COLUMN: if (pColumn == NULL) { @@ -669,6 +727,8 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl memmove(pColumn, pColumn + 1, tlen); } pSchema->nCols--; + + --pMeta->pVnode->config.vndStats.numOfNTimeSeries; break; case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: if (pColumn == NULL) { @@ -708,7 +768,7 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl // save to table db metaSaveToTbDb(pMeta, &entry); - tdbTbcUpsert(pUidIdxc, &entry.uid, sizeof(tb_uid_t), &version, sizeof(version), 0); + metaUpdateUidIdx(pMeta, &entry); metaSaveToSkmDb(pMeta, &entry); @@ -764,7 +824,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA ASSERT(c == 0); tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData); - oversion = *(int64_t *)pData; + oversion = ((SUidIdxVal *)pData)[0].version; // search table.db TBC *pTbDbc = NULL; @@ -784,8 +844,8 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA /* get stbEntry*/ tdbTbGet(pMeta->pUidIdx, &ctbEntry.ctbEntry.suid, sizeof(tb_uid_t), &pVal, &nVal); - tdbTbGet(pMeta->pTbDb, &((STbDbKey){.uid = ctbEntry.ctbEntry.suid, .version = *(int64_t *)pVal}), sizeof(STbDbKey), - (void **)&stbEntry.pBuf, &nVal); + tdbTbGet(pMeta->pTbDb, &((STbDbKey){.uid = ctbEntry.ctbEntry.suid, .version = ((SUidIdxVal *)pVal)[0].version}), + sizeof(STbDbKey), (void **)&stbEntry.pBuf, &nVal); tdbFree(pVal); tDecoderInit(&dc2, stbEntry.pBuf, nVal); metaDecodeEntry(&dc2, &stbEntry); @@ -859,12 +919,16 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA metaSaveToTbDb(pMeta, &ctbEntry); // save to uid.idx - tdbTbUpsert(pMeta->pUidIdx, &ctbEntry.uid, sizeof(tb_uid_t), &version, sizeof(version), &pMeta->txn); + metaUpdateUidIdx(pMeta, &ctbEntry); if (iCol == 0) { metaUpdateTagIdx(pMeta, &ctbEntry); } + SCtbIdxKey ctbIdxKey = {.suid = ctbEntry.ctbEntry.suid, .uid = uid}; + tdbTbUpsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), ctbEntry.ctbEntry.pTags, + ((STag *)(ctbEntry.ctbEntry.pTags))->len, &pMeta->txn); + tDecoderClear(&dc1); tDecoderClear(&dc2); if (ctbEntry.ctbEntry.pTags) taosMemoryFree((void *)ctbEntry.ctbEntry.pTags); @@ -914,7 +978,7 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p ASSERT(c == 0); tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData); - oversion = *(int64_t *)pData; + oversion = ((SUidIdxVal *)pData)[0].version; // search table.db TBC *pTbDbc = NULL; @@ -959,7 +1023,7 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p // save to table db metaSaveToTbDb(pMeta, &entry); - tdbTbcUpsert(pUidIdxc, &entry.uid, sizeof(tb_uid_t), &version, sizeof(version), 0); + metaUpdateUidIdx(pMeta, &entry); metaULock(pMeta); tdbTbcClose(pTbDbc); @@ -1042,7 +1106,14 @@ _err: } static int metaUpdateUidIdx(SMeta *pMeta, const SMetaEntry *pME) { - return tdbTbInsert(pMeta->pUidIdx, &pME->uid, sizeof(tb_uid_t), &pME->version, sizeof(int64_t), &pMeta->txn); + // upsert cache + SMetaInfo info; + metaGetEntryInfo(pME, &info); + metaCacheUpsert(pMeta, &info); + + SUidIdxVal uidIdxVal = {.suid = info.suid, .version = info.version, .skmVer = info.skmVer}; + + return tdbTbUpsert(pMeta->pUidIdx, &pME->uid, sizeof(tb_uid_t), &uidIdxVal, sizeof(uidIdxVal), &pMeta->txn); } static int metaUpdateSuidIdx(SMeta *pMeta, const SMetaEntry *pME) { @@ -1062,7 +1133,9 @@ static int metaUpdateTtlIdx(SMeta *pMeta, const SMetaEntry *pME) { static int metaUpdateCtbIdx(SMeta *pMeta, const SMetaEntry *pME) { SCtbIdxKey ctbIdxKey = {.suid = pME->ctbEntry.suid, .uid = pME->uid}; - return tdbTbInsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), NULL, 0, &pMeta->txn); + + return tdbTbInsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), pME->ctbEntry.pTags, + ((STag *)(pME->ctbEntry.pTags))->len, &pMeta->txn); } int metaCreateTagIdxKey(tb_uid_t suid, int32_t cid, const void *pTagData, int32_t nTagData, int8_t type, tb_uid_t uid, @@ -1118,7 +1191,7 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { return -1; } tbDbKey.uid = pCtbEntry->ctbEntry.suid; - tbDbKey.version = *(int64_t *)pData; + tbDbKey.version = ((SUidIdxVal *)pData)[0].version; tdbTbGet(pMeta->pTbDb, &tbDbKey, sizeof(tbDbKey), &pData, &nData); tDecoderInit(&dc, pData, nData); diff --git a/source/dnode/vnode/src/sma/smaCommit.c b/source/dnode/vnode/src/sma/smaCommit.c index 373cfdfb47a47ceaba6be6ca0e5e9531f61e2441..3cf50a035a720f7bf9e106c69a8a88e1117a8954 100644 --- a/source/dnode/vnode/src/sma/smaCommit.c +++ b/source/dnode/vnode/src/sma/smaCommit.c @@ -83,8 +83,7 @@ int32_t smaBegin(SSma *pSma) { return TSDB_CODE_SUCCESS; } - SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv); - SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat); + SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pSmaEnv); int8_t rsmaTriggerStat = atomic_val_compare_exchange_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED, TASK_TRIGGER_STAT_ACTIVE); @@ -110,7 +109,7 @@ int32_t smaBegin(SSma *pSma) { /** * @brief pre-commit for rollup sma(sync commit). * 1) set trigger stat of rsma timer TASK_TRIGGER_STAT_PAUSED. - * 2) wait all triggered fetch tasks finished + * 2) wait for all triggered fetch tasks to finish * 3) perform persist task for qTaskInfo * * @param pSma @@ -123,19 +122,19 @@ static int32_t tdProcessRSmaSyncPreCommitImpl(SSma *pSma) { } SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv); - SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat); + SRSmaStat *pRSmaStat = SMA_STAT_RSMA(pStat); // step 1: set rsma stat paused atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED); - // step 2: wait all triggered fetch tasks finished + // step 2: wait for all triggered fetch tasks to finish int32_t nLoops = 0; while (1) { if (T_REF_VAL_GET(pStat) == 0) { - smaDebug("vgId:%d, rsma fetch tasks all finished", SMA_VID(pSma)); + smaDebug("vgId:%d, rsma fetch tasks are all finished", SMA_VID(pSma)); break; } else { - smaDebug("vgId:%d, rsma fetch tasks not all finished yet", SMA_VID(pSma)); + smaDebug("vgId:%d, rsma fetch tasks are not all finished yet", SMA_VID(pSma)); } ++nLoops; if (nLoops > 1000) { @@ -173,7 +172,7 @@ static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat) { TdDirPtr pDir = NULL; TdDirEntryPtr pDirEntry = NULL; char dir[TSDB_FILENAME_LEN]; - const char *pattern = "v[0-9]+qtaskinfo\\.ver([0-9]+)?$"; + const char *pattern = "v[0-9]+qinf\\.v([0-9]+)?$"; regex_t regex; int code = 0; @@ -289,8 +288,7 @@ static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma) { return TSDB_CODE_SUCCESS; } - SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma); - SRSmaStat *pRSmaStat = SMA_RSMA_STAT(SMA_ENV_STAT(pSmaEnv)); + SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma); // cleanup outdated qtaskinfo files tdCleanupQTaskInfoFiles(pSma, pRSmaStat); @@ -299,10 +297,9 @@ static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma) { } /** - * @brief Rsma async commit implementation + * @brief Rsma async commit implementation(only do some necessary light weighted task) * 1) set rsma stat TASK_TRIGGER_STAT_PAUSED * 2) Wait all running fetch task finish to fetch and put submitMsg into level 2/3 wQueue(blocking level 1 write) - * 3) * * @param pSma * @return int32_t @@ -314,20 +311,29 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) { } SSmaStat *pStat = SMA_ENV_STAT(pEnv); - SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat); + SRSmaStat *pRSmaStat = SMA_STAT_RSMA(pStat); + int32_t nLoops = 0; // step 1: set rsma stat atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED); - atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 1); + while (atomic_val_compare_exchange_8(RSMA_COMMIT_STAT(pRSmaStat), 0, 1) != 0) { + ++nLoops; + if (nLoops > 1000) { + sched_yield(); + nLoops = 0; + } + } + pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied; + ASSERT(pRSmaStat->commitAppliedVer > 0); - // step 2: wait all triggered fetch tasks finished - int32_t nLoops = 0; + // step 2: wait for all triggered fetch tasks to finish + nLoops = 0; while (1) { if (T_REF_VAL_GET(pStat) == 0) { - smaDebug("vgId:%d, rsma fetch tasks all finished", SMA_VID(pSma)); + smaDebug("vgId:%d, rsma commit, fetch tasks are all finished", SMA_VID(pSma)); break; } else { - smaDebug("vgId:%d, rsma fetch tasks not all finished yet", SMA_VID(pSma)); + smaDebug("vgId:%d, rsma commit, fetch tasks are not all finished yet", SMA_VID(pSma)); } ++nLoops; if (nLoops > 1000) { @@ -336,29 +342,52 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) { } } - // step 3: swap rsmaInfoHash and iRsmaInfoHash + /** + * @brief step 3: consume the SubmitReq in buffer + * 1) This is high cost task and should not put in asyncPreCommit originally. + * 2) But, if put in asyncCommit, would trigger taskInfo cloning frequently. + */ + if (tdRSmaProcessExecImpl(pSma, RSMA_EXEC_COMMIT) < 0) { + return TSDB_CODE_FAILED; + } + + smaInfo("vgId:%d, rsma commit, wait for all items to be consumed, TID:%p", SMA_VID(pSma), + (void *)taosGetSelfPthreadId()); + nLoops = 0; + while (atomic_load_64(&pRSmaStat->nBufItems) > 0) { + ++nLoops; + if (nLoops > 1000) { + sched_yield(); + nLoops = 0; + } + } + smaInfo("vgId:%d, rsma commit, all items are consumed, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId()); + if (tdRSmaPersistExecImpl(pRSmaStat, RSMA_INFO_HASH(pRSmaStat)) < 0) { + return TSDB_CODE_FAILED; + } + smaInfo("vgId:%d, rsma commit, operator state commited, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId()); + +#if 0 // consuming task of qTaskInfo clone + // step 4: swap queue/qall and iQueue/iQall // lock - taosWLockLatch(SMA_ENV_LOCK(pEnv)); + // taosWLockLatch(SMA_ENV_LOCK(pEnv)); ASSERT(RSMA_INFO_HASH(pRSmaStat)); - ASSERT(!RSMA_IMU_INFO_HASH(pRSmaStat)); - RSMA_IMU_INFO_HASH(pRSmaStat) = RSMA_INFO_HASH(pRSmaStat); - RSMA_INFO_HASH(pRSmaStat) = - taosHashInit(RSMA_TASK_INFO_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK); + void *pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), NULL); - if (!RSMA_INFO_HASH(pRSmaStat)) { - // unlock - taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); - smaError("vgId:%d, rsma async commit failed since %s", SMA_VID(pSma), terrstr()); - return TSDB_CODE_FAILED; + while (pIter) { + SRSmaInfo *pInfo = *(SRSmaInfo **)pIter; + TSWAP(pInfo->iQall, pInfo->qall); + TSWAP(pInfo->iQueue, pInfo->queue); + TSWAP(pInfo->iTaskInfo[0], pInfo->taskInfo[0]); + TSWAP(pInfo->iTaskInfo[1], pInfo->taskInfo[1]); + pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), pIter); } // unlock - taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); - - // step 4: others - pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied; + // taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); +#endif return TSDB_CODE_SUCCESS; } @@ -374,18 +403,20 @@ static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma) { if (!pSmaEnv) { return TSDB_CODE_SUCCESS; } +#if 0 + SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pSmaEnv); - SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv); - SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat); - - // perform persist task for qTaskInfo - tdRSmaPersistExecImpl(pRSmaStat, RSMA_IMU_INFO_HASH(pRSmaStat)); + // perform persist task for qTaskInfo operator + if (tdRSmaPersistExecImpl(pRSmaStat, RSMA_INFO_HASH(pRSmaStat)) < 0) { + return TSDB_CODE_FAILED; + } +#endif return TSDB_CODE_SUCCESS; } /** - * @brief Migrate rsmaInfo from iRsmaInfo to rsmaInfo if rsmaInfoHash not empty. + * @brief Migrate rsmaInfo from iRsmaInfo to rsmaInfo if rsma infoHash not empty. * * @param pSma * @return int32_t @@ -396,68 +427,66 @@ static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) { return TSDB_CODE_SUCCESS; } - SSmaStat *pStat = SMA_ENV_STAT(pEnv); - SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat); + SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pEnv); + SArray *rsmaDeleted = NULL; - // step 1: merge rsmaInfoHash and iRsmaInfoHash + // step 1: merge qTaskInfo and iQTaskInfo // lock - taosWLockLatch(SMA_ENV_LOCK(pEnv)); -#if 0 - if (taosHashGetSize(RSMA_INFO_HASH(pRSmaStat)) <= 0) { - // just switch the hash pointer if rsmaInfoHash is empty - if (taosHashGetSize(RSMA_IMU_INFO_HASH(pRSmaStat)) > 0) { - SHashObj *infoHash = RSMA_INFO_HASH(pRSmaStat); - RSMA_INFO_HASH(pRSmaStat) = RSMA_IMU_INFO_HASH(pRSmaStat); - RSMA_IMU_INFO_HASH(pRSmaStat) = infoHash; - } - } else { -#endif -#if 1 - void *pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), NULL); - while (pIter) { - tb_uid_t *pSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL); - - if (!taosHashGet(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t))) { - SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter; - if (RSMA_INFO_IS_DEL(pRSmaInfo)) { - int32_t refVal = T_REF_VAL_GET(pRSmaInfo); - if (refVal == 0) { - tdFreeRSmaInfo(pSma, pRSmaInfo, true); - smaDebug( - "vgId:%d, rsma async post commit, free rsma info since already deleted and ref is 0 for " - "table:%" PRIi64, - SMA_VID(pSma), *pSuid); - } else { - smaDebug( - "vgId:%d, rsma async post commit, not free rsma info since ref is %d although already deleted for " - "table:%" PRIi64, - SMA_VID(pSma), refVal, *pSuid); + // taosWLockLatch(SMA_ENV_LOCK(pEnv)); + + void *pIter = NULL; + while ((pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), pIter))) { + tb_uid_t *pSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL); + SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter; + if (RSMA_INFO_IS_DEL(pRSmaInfo)) { + int32_t refVal = T_REF_VAL_GET(pRSmaInfo); + if (refVal == 0) { + if (!rsmaDeleted) { + if ((rsmaDeleted = taosArrayInit(1, sizeof(tb_uid_t)))) { + taosArrayPush(rsmaDeleted, pSuid); + } } + } else { + smaDebug( + "vgId:%d, rsma async post commit, not free rsma info since ref is %d although already deleted for " + "table:%" PRIi64, + SMA_VID(pSma), refVal, *pSuid); + } - pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), pIter); - continue; + continue; + } +#if 0 + if (pRSmaInfo->taskInfo[0]) { + if (pRSmaInfo->iTaskInfo[0]) { + SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pRSmaInfo->iTaskInfo[0]; + tdFreeRSmaInfo(pSma, pRSmaInfo, false); + pRSmaInfo->iTaskInfo[0] = NULL; } - taosHashPut(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t), pIter, sizeof(pIter)); - smaDebug("vgId:%d, rsma async post commit, migrated from iRsmaInfoHash for table:%" PRIi64, SMA_VID(pSma), - *pSuid); } else { - // free the resources - SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter; - tdFreeRSmaInfo(pSma, pRSmaInfo, false); - smaDebug("vgId:%d, rsma async post commit, free rsma info since already COW for table:%" PRIi64, SMA_VID(pSma), - *pSuid); + TSWAP(pRSmaInfo->taskInfo[0], pRSmaInfo->iTaskInfo[0]); } - pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), pIter); - } + taosHashPut(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t), pIter, sizeof(pIter)); + smaDebug("vgId:%d, rsma async post commit, migrated from iRsmaInfoHash for table:%" PRIi64, SMA_VID(pSma), *pSuid); #endif - // } + } - taosHashCleanup(RSMA_IMU_INFO_HASH(pRSmaStat)); - RSMA_IMU_INFO_HASH(pRSmaStat) = NULL; + for (int32_t i = 0; i < taosArrayGetSize(rsmaDeleted); ++i) { + tb_uid_t *pSuid = taosArrayGet(rsmaDeleted, i); + void *pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t)); + if ((pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) { + tdFreeRSmaInfo(pSma, pRSmaInfo, true); + smaDebug( + "vgId:%d, rsma async post commit, free rsma info since already deleted and ref is 0 for " + "table:%" PRIi64, + SMA_VID(pSma), *pSuid); + } + taosHashRemove(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t)); + } + taosArrayDestroy(rsmaDeleted); // unlock - taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); + // taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); // step 2: cleanup outdated qtaskinfo files tdCleanupQTaskInfoFiles(pSma, pRSmaStat); diff --git a/source/dnode/vnode/src/sma/smaEnv.c b/source/dnode/vnode/src/sma/smaEnv.c index ccb6ad3a72c2358772b652d04709d5eebc2fd2fb..32a419022a312f9ab21681b9bc6f819c7792f51e 100644 --- a/source/dnode/vnode/src/sma/smaEnv.c +++ b/source/dnode/vnode/src/sma/smaEnv.c @@ -23,11 +23,13 @@ extern SSmaMgmt smaMgmt; // declaration of static functions -static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pSma); -static SSmaEnv *tdNewSmaEnv(const SSma *pSma, int8_t smaType, const char *path); -static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, const char *path, SSmaEnv **pEnv); -static void *tdFreeTSmaStat(STSmaStat *pStat); -static void tdDestroyRSmaStat(void *pRSmaStat); +static int32_t tdNewSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv); +static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv); +static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pSma); +static int32_t tdRsmaStartExecutor(const SSma *pSma); +static int32_t tdRsmaStopExecutor(const SSma *pSma); +static void *tdFreeTSmaStat(STSmaStat *pStat); +static void tdDestroyRSmaStat(void *pRSmaStat); /** * @brief rsma init @@ -97,35 +99,42 @@ void smaCleanUp() { } } -static SSmaEnv *tdNewSmaEnv(const SSma *pSma, int8_t smaType, const char *path) { +static int32_t tdNewSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv) { SSmaEnv *pEnv = NULL; pEnv = (SSmaEnv *)taosMemoryCalloc(1, sizeof(SSmaEnv)); + *ppEnv = pEnv; if (!pEnv) { terrno = TSDB_CODE_OUT_OF_MEMORY; - return NULL; + return TSDB_CODE_FAILED; } SMA_ENV_TYPE(pEnv) = smaType; taosInitRWLatch(&(pEnv->lock)); + (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&SMA_TSMA_ENV(pSma), *ppEnv) + : atomic_store_ptr(&SMA_RSMA_ENV(pSma), *ppEnv); + if (tdInitSmaStat(&SMA_ENV_STAT(pEnv), smaType, pSma) != TSDB_CODE_SUCCESS) { tdFreeSmaEnv(pEnv); - return NULL; + *ppEnv = NULL; + (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&SMA_TSMA_ENV(pSma), NULL) + : atomic_store_ptr(&SMA_RSMA_ENV(pSma), NULL); + return TSDB_CODE_FAILED; } - return pEnv; + return TSDB_CODE_SUCCESS; } -static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, const char *path, SSmaEnv **pEnv) { - if (!pEnv) { +static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv) { + if (!ppEnv) { terrno = TSDB_CODE_INVALID_PTR; return TSDB_CODE_FAILED; } - if (!(*pEnv)) { - if (!(*pEnv = tdNewSmaEnv(pSma, smaType, path))) { + if (!(*ppEnv)) { + if (tdNewSmaEnv(pSma, smaType, ppEnv) != TSDB_CODE_SUCCESS) { return TSDB_CODE_FAILED; } } @@ -171,7 +180,7 @@ int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat) { int32_t tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) { if (!pRSmaInfo) return 0; - + int ref = T_REF_INC(pRSmaInfo); smaDebug("vgId:%d, ref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref); return 0; @@ -199,7 +208,7 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS * tdInitSmaStat invoked in other multithread environment later. */ if (!(*pSmaStat)) { - *pSmaStat = (SSmaStat *)taosMemoryCalloc(1, sizeof(SSmaStat)); + *pSmaStat = (SSmaStat *)taosMemoryCalloc(1, sizeof(SSmaStat) + sizeof(TdThread) * tsNumOfVnodeRsmaThreads); if (!(*pSmaStat)) { terrno = TSDB_CODE_OUT_OF_MEMORY; return TSDB_CODE_FAILED; @@ -209,6 +218,7 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS SRSmaStat *pRSmaStat = (SRSmaStat *)(*pSmaStat); pRSmaStat->pSma = (SSma *)pSma; atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_INIT); + tsem_init(&pRSmaStat->notEmpty, 0, 0); // init smaMgmt smaInit(); @@ -228,7 +238,10 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS RSMA_INFO_HASH(pRSmaStat) = taosHashInit( RSMA_TASK_INFO_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK); if (!RSMA_INFO_HASH(pRSmaStat)) { - taosMemoryFreeClear(*pSmaStat); + return TSDB_CODE_FAILED; + } + + if (tdRsmaStartExecutor(pSma) < 0) { return TSDB_CODE_FAILED; } } else if (smaType == TSDB_SMA_TYPE_TIME_RANGE) { @@ -262,10 +275,9 @@ static void tdDestroyRSmaStat(void *pRSmaStat) { smaDebug("vgId:%d, destroy rsma stat %p", SMA_VID(pSma), pRSmaStat); // step 1: set rsma trigger stat cancelled atomic_store_8(RSMA_TRIGGER_STAT(pStat), TASK_TRIGGER_STAT_CANCELLED); + tsem_destroy(&(pStat->notEmpty)); // step 2: destroy the rsma info and associated fetch tasks - // TODO: use taosHashSetFreeFp when taosHashSetFreeFp is ready. -#if 1 if (taosHashGetSize(RSMA_INFO_HASH(pStat)) > 0) { void *infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), NULL); while (infoHash) { @@ -274,17 +286,16 @@ static void tdDestroyRSmaStat(void *pRSmaStat) { infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), infoHash); } } -#endif taosHashCleanup(RSMA_INFO_HASH(pStat)); - // step 3: wait all triggered fetch tasks finished + // step 3: wait for all triggered fetch tasks to finish int32_t nLoops = 0; while (1) { if (T_REF_VAL_GET((SSmaStat *)pStat) == 0) { - smaDebug("vgId:%d, rsma fetch tasks all finished", SMA_VID(pSma)); + smaDebug("vgId:%d, rsma fetch tasks are all finished", SMA_VID(pSma)); break; } else { - smaDebug("vgId:%d, rsma fetch tasks not all finished yet", SMA_VID(pSma)); + smaDebug("vgId:%d, rsma fetch tasks are not all finished yet", SMA_VID(pSma)); } ++nLoops; if (nLoops > 1000) { @@ -293,7 +304,10 @@ static void tdDestroyRSmaStat(void *pRSmaStat) { } } - // step 4: free pStat + // step 4: + tdRsmaStopExecutor(pSma); + + // step 5: free pStat taosMemoryFreeClear(pStat); } } @@ -318,9 +332,9 @@ void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType) { int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) { if (pSmaStat) { if (smaType == TSDB_SMA_TYPE_TIME_RANGE) { - tdDestroyTSmaStat(SMA_TSMA_STAT(pSmaStat)); + tdDestroyTSmaStat(SMA_STAT_TSMA(pSmaStat)); } else if (smaType == TSDB_SMA_TYPE_ROLLUP) { - SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSmaStat); + SRSmaStat *pRSmaStat = &pSmaStat->rsmaStat; int32_t vid = SMA_VID(pRSmaStat->pSma); int64_t refId = RSMA_REF_ID(pRSmaStat); if (taosRemoveRef(smaMgmt.rsetId, RSMA_REF_ID(pRSmaStat)) < 0) { @@ -383,17 +397,70 @@ int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType) { pEnv = (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_load_ptr(&SMA_TSMA_ENV(pSma)) : atomic_load_ptr(&SMA_RSMA_ENV(pSma)); if (!pEnv) { - char rname[TSDB_FILENAME_LEN] = {0}; - - if (tdInitSmaEnv(pSma, smaType, rname, &pEnv) < 0) { + if (tdInitSmaEnv(pSma, smaType, &pEnv) < 0) { tdUnLockSma(pSma); return TSDB_CODE_FAILED; } - - (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&SMA_TSMA_ENV(pSma), pEnv) - : atomic_store_ptr(&SMA_RSMA_ENV(pSma), pEnv); } tdUnLockSma(pSma); return TSDB_CODE_SUCCESS; }; + +void *tdRSmaExecutorFunc(void *param) { + setThreadName("vnode-rsma"); + + tdRSmaProcessExecImpl((SSma *)param, RSMA_EXEC_OVERFLOW); + return NULL; +} + +static int32_t tdRsmaStartExecutor(const SSma *pSma) { + TdThreadAttr thAttr = {0}; + taosThreadAttrInit(&thAttr); + taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE); + + SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); + SSmaStat *pStat = SMA_ENV_STAT(pEnv); + TdThread *pthread = (TdThread *)&pStat->data; + + for (int32_t i = 0; i < tsNumOfVnodeRsmaThreads; ++i) { + if (taosThreadCreate(&pthread[i], &thAttr, tdRSmaExecutorFunc, (void *)pSma) != 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + smaError("vgId:%d, failed to create pthread for rsma since %s", SMA_VID(pSma), terrstr()); + return -1; + } + smaDebug("vgId:%d, success to create pthread for rsma", SMA_VID(pSma)); + } + + taosThreadAttrDestroy(&thAttr); + return 0; +} + +static int32_t tdRsmaStopExecutor(const SSma *pSma) { + if (pSma && VND_IS_RSMA(pSma->pVnode)) { + SSmaEnv *pEnv = NULL; + SSmaStat *pStat = NULL; + SRSmaStat *pRSmaStat = NULL; + TdThread *pthread = NULL; + + if (!(pEnv = SMA_RSMA_ENV(pSma)) || !(pStat = SMA_ENV_STAT(pEnv))) { + return 0; + } + + pEnv->flag |= SMA_ENV_FLG_CLOSE; + pRSmaStat = (SRSmaStat *)pStat; + pthread = (TdThread *)&pStat->data; + + for (int32_t i = 0; i < tsNumOfVnodeRsmaThreads; ++i) { + tsem_post(&(pRSmaStat->notEmpty)); + } + + for (int32_t i = 0; i < tsNumOfVnodeRsmaThreads; ++i) { + if (taosCheckPthreadValid(pthread[i])) { + smaDebug("vgId:%d, start to join pthread for rsma:%" PRId64, SMA_VID(pSma), pthread[i]); + taosThreadJoin(pthread[i], NULL); + } + } + } + return 0; +} \ No newline at end of file diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index b7a2efd4897ffca43e2a7e8b25c3ca3f897c5f03..f33d8dc2d07911cebb2a51e44cb02debe530d24d 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -15,16 +15,21 @@ #include "sma.h" -#define RSMA_QTASKINFO_BUFSIZE 32768 -#define RSMA_QTASKINFO_HEAD_LEN (sizeof(int32_t) + sizeof(int8_t) + sizeof(int64_t)) // len + type + suid +#define RSMA_QTASKINFO_BUFSIZE (32768) // size +#define RSMA_QTASKINFO_HEAD_LEN (sizeof(int32_t) + sizeof(int8_t) + sizeof(int64_t)) // len + type + suid +#define RSMA_QTASKEXEC_SMOOTH_SIZE (100) // cnt +#define RSMA_SUBMIT_BATCH_SIZE (1024) // cnt +#define RSMA_FETCH_DELAY_MAX (900000) // ms +#define RSMA_FETCH_ACTIVE_MAX (1800) // ms +#define RSMA_FETCH_INTERVAL (5000) // ms SSmaMgmt smaMgmt = { .inited = 0, .rsetId = -1, }; -#define TD_QTASKINFO_FNAME_PREFIX "qtaskinfo.ver" -#define TD_RSMAINFO_DEL_FILE "rsmainfo.del" +#define TD_QTASKINFO_FNAME_PREFIX "qinf.v" + typedef struct SRSmaQTaskInfoItem SRSmaQTaskInfoItem; typedef struct SRSmaQTaskInfoIter SRSmaQTaskInfoIter; @@ -32,14 +37,15 @@ static int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *ui static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids); static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat *pStat, SRSmaInfo *pRSmaInfo, int8_t idx); -static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, SRSmaInfo *pInfo, tb_uid_t suid, - int8_t level); +static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t msgSize, int32_t inputType, SRSmaInfo *pInfo, + ERsmaExecType type, int8_t level); static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid); static void tdReleaseRSmaInfo(SSma *pSma, SRSmaInfo *pInfo); -static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema, - int64_t suid, int8_t blkType); +static void tdFreeRSmaSubmitItems(SArray *pItems); +static int32_t tdRSmaFetchAllResult(SSma *pSma, SRSmaInfo *pInfo, SArray *pSubmitArr); +static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema, + int64_t suid); static void tdRSmaFetchTrigger(void *param, void *tmrId); -static int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level); static int32_t tdRSmaQTaskInfoIterInit(SRSmaQTaskInfoIter *pIter, STFile *pTFile); static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isFinish); static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, int8_t type, SRSmaQTaskInfoIter *pIter); @@ -139,6 +145,18 @@ void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree) { if (isDeepFree) { taosMemoryFreeClear(pInfo->pTSchema); } + + if (isDeepFree) { + if (pInfo->queue) taosCloseQueue(pInfo->queue); + if (pInfo->qall) taosFreeQall(pInfo->qall); + if (pInfo->iQueue) taosCloseQueue(pInfo->iQueue); + if (pInfo->iQall) taosFreeQall(pInfo->iQall); + pInfo->queue = NULL; + pInfo->qall = NULL; + pInfo->iQueue = NULL; + pInfo->iQall = NULL; + } + taosMemoryFree(pInfo); } @@ -179,7 +197,7 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids) for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) { if (pRSmaInfo->taskInfo[i]) { - if ((qUpdateQualifiedTableId(pRSmaInfo->taskInfo[i], tbUids, true) < 0)) { + if (((terrno = qUpdateQualifiedTableId(pRSmaInfo->taskInfo[i], tbUids, true)) < 0)) { tdReleaseRSmaInfo(pSma, pRSmaInfo); smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " level %d since %s", SMA_VID(pSma), *suid, i, terrstr()); @@ -351,6 +369,19 @@ int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, con goto _err; } pRSmaInfo->pTSchema = pTSchema; + if (!(pRSmaInfo->queue = taosOpenQueue())) { + goto _err; + } + + if (!(pRSmaInfo->qall = taosAllocateQall())) { + goto _err; + } + if (!(pRSmaInfo->iQueue = taosOpenQueue())) { + goto _err; + } + if (!(pRSmaInfo->iQall = taosAllocateQall())) { + goto _err; + } pRSmaInfo->suid = suid; pRSmaInfo->refId = RSMA_REF_ID(pStat); T_REF_INIT_VAL(pRSmaInfo, 1); @@ -419,8 +450,7 @@ int32_t tdProcessRSmaDrop(SSma *pSma, SVDropStbReq *pReq) { return TSDB_CODE_SUCCESS; } - SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv); - SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat); + SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pSmaEnv); SRSmaInfo *pRSmaInfo = tdAcquireRSmaInfoBySuid(pSma, pReq->suid); @@ -528,6 +558,14 @@ void *tdUidStoreFree(STbUidStore *pStore) { return NULL; } +/** + * @brief The SubmitReq for rsma L2/L3 is inserted by tsdbInsertData method directly while not by WriteQ, as the queue + * would be freed when close Vnode, thus lock should be used if with race condition. + * @param pTsdb + * @param version + * @param pReq + * @return int32_t + */ static int32_t tdProcessSubmitReq(STsdb *pTsdb, int64_t version, void *pReq) { if (!pReq) { terrno = TSDB_CODE_INVALID_PTR; @@ -535,7 +573,7 @@ static int32_t tdProcessSubmitReq(STsdb *pTsdb, int64_t version, void *pReq) { } SSubmitReq *pSubmitReq = (SSubmitReq *)pReq; - + // TODO: spin lock for race conditiond if (tsdbInsertData(pTsdb, version, pSubmitReq, NULL) < 0) { return TSDB_CODE_FAILED; } @@ -569,17 +607,6 @@ static int32_t tdFetchSubmitReqSuids(SSubmitReq *pMsg, STbUidStore *pStore) { return 0; } -static void tdDestroySDataBlockArray(SArray *pArray) { - // TODO -#if 0 - for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) { - SSDataBlock *pDataBlock = taosArrayGet(pArray, i); - blockDestroyInner(pDataBlock); - } -#endif - taosArrayDestroy(pArray); -} - /** * @brief retention of rsma1/rsma2 * @@ -589,7 +616,7 @@ static void tdDestroySDataBlockArray(SArray *pArray) { */ int32_t smaDoRetention(SSma *pSma, int64_t now) { int32_t code = TSDB_CODE_SUCCESS; - if (VND_IS_RSMA(pSma->pVnode)) { + if (!VND_IS_RSMA(pSma->pVnode)) { return code; } @@ -604,8 +631,8 @@ _end: return code; } -static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema, - int64_t suid, int8_t blkType) { +static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema, + int64_t suid) { SArray *pResList = taosArrayInit(1, POINTER_BYTES); if (pResList == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -615,7 +642,7 @@ static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSm while (1) { uint64_t ts; int32_t code = qExecTaskOpt(taskInfo, pResList, &ts); - if (code < 0) { + if (code < 0) { if (code == TSDB_CODE_QRY_IN_EXEC) { break; } else { @@ -637,8 +664,7 @@ static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSm } else { smaDebug("vgId:%d, rsma %" PRIi8 " data fetched", SMA_VID(pSma), pItem->level); } - -#if 1 +#if 0 char flag[10] = {0}; snprintf(flag, 10, "level %" PRIi8, pItem->level); blockDebugShowDataBlocks(pResList, flag); @@ -662,10 +688,9 @@ static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSm goto _err; } taosMemoryFreeClear(pReq); - + smaDebug("vgId:%d, process submit req for rsma table %" PRIi64 " level %" PRIi8 " version:%" PRIi64, SMA_VID(pSma), suid, pItem->level, output->info.version); - } } @@ -677,34 +702,117 @@ _err: return TSDB_CODE_FAILED; } -static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, SRSmaInfo *pInfo, tb_uid_t suid, - int8_t level) { +/** + * @brief Copy msg to rsmaQueueBuffer for batch process + * + * @param pSma + * @param pMsg + * @param inputType + * @param pInfo + * @param suid + * @return int32_t + */ +static int32_t tdExecuteRSmaImplAsync(SSma *pSma, const void *pMsg, int32_t inputType, SRSmaInfo *pInfo, + tb_uid_t suid) { + const SSubmitReq *pReq = (const SSubmitReq *)pMsg; + + void *qItem = taosAllocateQitem(pReq->header.contLen, DEF_QITEM); + if (!qItem) { + return TSDB_CODE_FAILED; + } + + memcpy(qItem, pMsg, pReq->header.contLen); + + taosWriteQitem(pInfo->queue, qItem); + + pInfo->lastRecv = taosGetTimestampMs(); + + SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma); + + int64_t nItems = atomic_fetch_add_64(&pRSmaStat->nBufItems, 1); + + if (atomic_load_8(&pInfo->assigned) == 0) { + tsem_post(&(pRSmaStat->notEmpty)); + } + + // smoothing consume + int32_t n = nItems / RSMA_QTASKEXEC_SMOOTH_SIZE; + if (n > 1) { + if (n > 10) { + n = 10; + } + taosMsleep(n << 3); + if (n > 5) { + smaWarn("vgId:%d, pInfo->queue itemSize:%d, memSize:%" PRIi64 ", sleep %d ms", SMA_VID(pSma), + taosQueueItemSize(pInfo->queue), taosQueueMemorySize(pInfo->queue), n << 3); + } + } + + return TSDB_CODE_SUCCESS; +} + +static int32_t tdRsmaPrintSubmitReq(SSma *pSma, SSubmitReq *pReq) { + SSubmitMsgIter msgIter = {0}; + SSubmitBlkIter blkIter = {0}; + STSRow *row = NULL; + if (tInitSubmitMsgIter(pReq, &msgIter) < 0) return -1; + while (true) { + SSubmitBlk *pBlock = NULL; + if (tGetSubmitMsgNext(&msgIter, &pBlock) < 0) return -1; + if (pBlock == NULL) break; + tInitSubmitBlkIter(&msgIter, pBlock, &blkIter); + while ((row = tGetSubmitBlkNext(&blkIter)) != NULL) { + smaDebug("vgId:%d, numOfRows:%d, suid:%" PRIi64 ", uid:%" PRIi64 ", version:%" PRIi64 ", ts:%" PRIi64, + SMA_VID(pSma), msgIter.numOfRows, msgIter.suid, msgIter.uid, pReq->version, row->ts); + } + } + return 0; +} + +/** + * @brief sync mode + * + * @param pSma + * @param pMsg + * @param msgSize + * @param inputType + * @param pInfo + * @param type + * @param level + * @return int32_t + */ +static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t msgSize, int32_t inputType, SRSmaInfo *pInfo, + ERsmaExecType type, int8_t level) { int32_t idx = level - 1; - if (!pInfo || !RSMA_INFO_QTASK(pInfo, idx)) { - smaDebug("vgId:%d, no qTaskInfo to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level, suid); + + void *qTaskInfo = (type == RSMA_EXEC_COMMIT) ? RSMA_INFO_IQTASK(pInfo, idx) : RSMA_INFO_QTASK(pInfo, idx); + if (!qTaskInfo) { + smaDebug("vgId:%d, no qTaskInfo to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level, + pInfo->suid); return TSDB_CODE_SUCCESS; } if (!pInfo->pTSchema) { - smaWarn("vgId:%d, no schema to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level, suid); + smaWarn("vgId:%d, no schema to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level, pInfo->suid); return TSDB_CODE_FAILED; } smaDebug("vgId:%d, execute rsma %" PRIi8 " task for qTaskInfo:%p suid:%" PRIu64, SMA_VID(pSma), level, - RSMA_INFO_QTASK(pInfo, idx), suid); + RSMA_INFO_QTASK(pInfo, idx), pInfo->suid); - if (qSetMultiStreamInput(RSMA_INFO_QTASK(pInfo, idx), pMsg, 1, inputType) < 0) { // INPUT__DATA_SUBMIT +#if 0 + for (int32_t i = 0; i < msgSize; ++i) { + SSubmitReq *pReq = *(SSubmitReq **)((char *)pMsg + i * sizeof(void *)); + smaDebug("vgId:%d, [%d][%d] version %" PRIi64, SMA_VID(pSma), msgSize, i, pReq->version); + tdRsmaPrintSubmitReq(pSma, pReq); + } +#endif + if (qSetMultiStreamInput(qTaskInfo, pMsg, msgSize, inputType) < 0) { smaError("vgId:%d, rsma %" PRIi8 " qSetStreamInput failed since %s", SMA_VID(pSma), level, tstrerror(terrno)); return TSDB_CODE_FAILED; } SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, idx); - tdRSmaFetchAndSubmitResult(pSma, RSMA_INFO_QTASK(pInfo, idx), pItem, pInfo->pTSchema, suid, - STREAM_INPUT__DATA_SUBMIT); - atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE); - - if (smaMgmt.tmrHandle) { - taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay, pItem, smaMgmt.tmrHandle, &pItem->tmrId); - } + tdRSmaExecAndSubmitResult(pSma, qTaskInfo, pItem, pInfo->pTSchema, pInfo->suid); return TSDB_CODE_SUCCESS; } @@ -732,58 +840,27 @@ static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid) { return NULL; } - taosRLockLatch(SMA_ENV_LOCK(pEnv)); + // taosRLockLatch(SMA_ENV_LOCK(pEnv)); pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)); if (pRSmaInfo && (pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) { if (RSMA_INFO_IS_DEL(pRSmaInfo)) { - taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); + // taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); return NULL; } - tdRefRSmaInfo(pSma, pRSmaInfo); - taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); - return pRSmaInfo; - } - taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); - - if (RSMA_COMMIT_STAT(pStat) == 0) { // return NULL if not in committing stat - return NULL; - } - - // clone the SRSmaInfo from iRsmaInfoHash to rsmaInfoHash if in committing stat - SRSmaInfo *pCowRSmaInfo = NULL; - // lock - taosWLockLatch(SMA_ENV_LOCK(pEnv)); - if (!(pCowRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)))) { // 2-phase lock - void *iRSmaInfo = taosHashGet(RSMA_IMU_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)); - if (iRSmaInfo) { - SRSmaInfo *pIRSmaInfo = *(SRSmaInfo **)iRSmaInfo; - if (pIRSmaInfo && !RSMA_INFO_IS_DEL(pIRSmaInfo)) { - if (tdCloneRSmaInfo(pSma, &pCowRSmaInfo, pIRSmaInfo) < 0) { - // unlock - taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); - smaError("vgId:%d, clone rsma info failed for suid:%" PRIu64 " since %s", SMA_VID(pSma), suid, terrstr()); - return NULL; - } - smaDebug("vgId:%d, clone rsma info succeed for suid:%" PRIu64, SMA_VID(pSma), suid); - if (taosHashPut(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t), &pCowRSmaInfo, sizeof(pCowRSmaInfo)) < 0) { - // unlock - taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); - smaError("vgId:%d, clone rsma info failed for suid:%" PRIu64 " since %s", SMA_VID(pSma), suid, terrstr()); - return NULL; - } + if (!pRSmaInfo->taskInfo[0]) { + if (tdCloneRSmaInfo(pSma, pRSmaInfo) < 0) { + // taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); + return NULL; } } - } else { - pCowRSmaInfo = *(SRSmaInfo **)pCowRSmaInfo; - ASSERT(!pCowRSmaInfo); + tdRefRSmaInfo(pSma, pRSmaInfo); + // taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); + ASSERT(pRSmaInfo->suid == suid); + return pRSmaInfo; } + // taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); - if (pCowRSmaInfo) { - tdRefRSmaInfo(pSma, pCowRSmaInfo); - } - // unlock - taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); - return pCowRSmaInfo; + return NULL; } static FORCE_INLINE void tdReleaseRSmaInfo(SSma *pSma, SRSmaInfo *pInfo) { @@ -792,16 +869,39 @@ static FORCE_INLINE void tdReleaseRSmaInfo(SSma *pSma, SRSmaInfo *pInfo) { } } -static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid) { +/** + * @brief async mode + * + * @param pSma + * @param pMsg + * @param inputType + * @param suid + * @return int32_t + */ +static int32_t tdExecuteRSmaAsync(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid) { SRSmaInfo *pRSmaInfo = tdAcquireRSmaInfoBySuid(pSma, suid); if (!pRSmaInfo) { - smaError("vgId:%d, execute rsma, no rsma info for suid:%" PRIu64, SMA_VID(pSma), suid); + smaDebug("vgId:%d, execute rsma, no rsma info for suid:%" PRIu64, SMA_VID(pSma), suid); return TSDB_CODE_SUCCESS; } if (inputType == STREAM_INPUT__DATA_SUBMIT) { - tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo, suid, TSDB_RETENTION_L1); - tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo, suid, TSDB_RETENTION_L2); + if (tdExecuteRSmaImplAsync(pSma, pMsg, inputType, pRSmaInfo, suid) < 0) { + tdReleaseRSmaInfo(pSma, pRSmaInfo); + return TSDB_CODE_FAILED; + } + if (smaMgmt.tmrHandle) { + SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pRSmaInfo, 0); + if (pItem->level > 0) { + atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE); + } + pItem = RSMA_INFO_ITEM(pRSmaInfo, 1); + if (pItem->level > 0) { + atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE); + } + } + } else { + ASSERT(0); } tdReleaseRSmaInfo(pSma, pRSmaInfo); @@ -814,7 +914,7 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) { // only applicable when rsma env exists return TSDB_CODE_SUCCESS; } - + STbUidStore uidStore = {0}; SRetention *pRetention = SMA_RETENTION(pSma); if (!RETENTION_VALID(pRetention + 1)) { // return directly if retention level 1 is invalid @@ -822,49 +922,71 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) { } if (inputType == STREAM_INPUT__DATA_SUBMIT) { - STbUidStore uidStore = {0}; - tdFetchSubmitReqSuids(pMsg, &uidStore); + if (tdFetchSubmitReqSuids(pMsg, &uidStore) < 0) { + goto _err; + } if (uidStore.suid != 0) { - tdExecuteRSma(pSma, pMsg, inputType, uidStore.suid); + if (tdExecuteRSmaAsync(pSma, pMsg, inputType, uidStore.suid) < 0) { + goto _err; + } - void *pIter = taosHashIterate(uidStore.uidHash, NULL); - while (pIter) { + void *pIter = NULL; + while ((pIter = taosHashIterate(uidStore.uidHash, pIter))) { tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL); - tdExecuteRSma(pSma, pMsg, inputType, *pTbSuid); - pIter = taosHashIterate(uidStore.uidHash, pIter); + if (tdExecuteRSmaAsync(pSma, pMsg, inputType, *pTbSuid) < 0) { + goto _err; + } } - - tdUidStoreDestory(&uidStore); } } + tdUidStoreDestory(&uidStore); return TSDB_CODE_SUCCESS; +_err: + tdUidStoreDestory(&uidStore); + smaError("vgId:%d, failed to process rsma submit since: %s", SMA_VID(pSma), terrstr()); + return TSDB_CODE_FAILED; } +/** + * @brief retrieve rsma meta and init + * + * @param pSma + * @param nTables number of tables of rsma + * @return int32_t + */ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables) { - SVnode *pVnode = pSma->pVnode; + SVnode *pVnode = pSma->pVnode; + SArray *suidList = NULL; + STbUidStore uidStore = {0}; + SMetaReader mr = {0}; - SArray *suidList = taosArrayInit(1, sizeof(tb_uid_t)); - if (tsdbGetStbIdList(SMA_META(pSma), 0, suidList) < 0) { - taosArrayDestroy(suidList); + if (!(suidList = taosArrayInit(1, sizeof(tb_uid_t)))) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + + if (vnodeGetStbIdList(pSma->pVnode, 0, suidList) < 0) { smaError("vgId:%d, failed to restore rsma env since get stb id list error: %s", TD_VID(pVnode), terrstr()); - return TSDB_CODE_FAILED; + goto _err; } int64_t arrSize = taosArrayGetSize(suidList); - if (nTables) { - *nTables = arrSize; - } - if (arrSize == 0) { + if (nTables) { + *nTables = 0; + } taosArrayDestroy(suidList); smaDebug("vgId:%d, no need to restore rsma env since empty stb id list", TD_VID(pVnode)); return TSDB_CODE_SUCCESS; } - SMetaReader mr = {0}; + int64_t nRsmaTables = 0; metaReaderInit(&mr, SMA_META(pSma), 0); + if (!(uidStore.tbUids = taosArrayInit(1024, sizeof(tb_uid_t)))) { + goto _err; + } for (int64_t i = 0; i < arrSize; ++i) { tb_uid_t suid = *(tb_uid_t *)taosArrayGet(suidList, i); smaDebug("vgId:%d, rsma restore, suid is %" PRIi64, TD_VID(pVnode), suid); @@ -877,6 +999,7 @@ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables) { ASSERT(mr.me.type == TSDB_SUPER_TABLE); ASSERT(mr.me.uid == suid); if (TABLE_IS_ROLLUP(mr.me.flags)) { + ++nRsmaTables; SRSmaParam *param = &mr.me.stbEntry.rsmaParam; for (int i = 0; i < TSDB_RETENTION_L2; ++i) { smaDebug("vgId:%d, rsma restore, table:%" PRIi64 " level:%d, maxdelay:%" PRIi64 " watermark:%" PRIi64 @@ -887,17 +1010,40 @@ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables) { smaError("vgId:%d, rsma restore env failed for %" PRIi64 " since %s", TD_VID(pVnode), suid, terrstr()); goto _err; } + + // reload all ctbUids for suid + uidStore.suid = suid; + if (vnodeGetCtbIdList(pVnode, suid, uidStore.tbUids) < 0) { + smaError("vgId:%d, rsma restore, get ctb idlist failed for %" PRIi64 " since %s", TD_VID(pVnode), suid, + terrstr()); + goto _err; + } + + if (tdUpdateTbUidList(pVnode->pSma, &uidStore) < 0) { + smaError("vgId:%d, rsma restore, update tb uid list failed for %" PRIi64 " since %s", TD_VID(pVnode), suid, + terrstr()); + goto _err; + } + + taosArrayClear(uidStore.tbUids); + smaDebug("vgId:%d, rsma restore env success for %" PRIi64, TD_VID(pVnode), suid); } } metaReaderClear(&mr); taosArrayDestroy(suidList); + tdUidStoreDestory(&uidStore); + + if (nTables) { + *nTables = nRsmaTables; + } return TSDB_CODE_SUCCESS; _err: metaReaderClear(&mr); taosArrayDestroy(suidList); + tdUidStoreDestory(&uidStore); return TSDB_CODE_FAILED; } @@ -1230,6 +1376,9 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) { } for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) { +#if 0 + qTaskInfo_t taskInfo = RSMA_INFO_IQTASK(pRSmaInfo, i); +#endif qTaskInfo_t taskInfo = RSMA_INFO_QTASK(pRSmaInfo, i); if (!taskInfo) { smaDebug("vgId:%d, rsma, table %" PRIi64 " level %d qTaskInfo is NULL", vid, pRSmaInfo->suid, i + 1); @@ -1347,13 +1496,13 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) { switch (rsmaTriggerStat) { case TASK_TRIGGER_STAT_PAUSED: case TASK_TRIGGER_STAT_CANCELLED: { - tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId); smaDebug("vgId:%d, rsma fetch task not start for level %" PRIi8 " since stat is %" PRIi8 ", rsetId rsetId:%" PRIi64 " refId:%d", SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pRSmaInfo->refId); if (rsmaTriggerStat == TASK_TRIGGER_STAT_PAUSED) { - taosTmrReset(tdRSmaFetchTrigger, 5000, pItem, smaMgmt.tmrHandle, &pItem->tmrId); + taosTmrReset(tdRSmaFetchTrigger, RSMA_FETCH_INTERVAL, pItem, smaMgmt.tmrHandle, &pItem->tmrId); } + tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId); return; } default: @@ -1364,10 +1513,19 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) { atomic_val_compare_exchange_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE, TASK_TRIGGER_STAT_INACTIVE); switch (fetchTriggerStat) { case TASK_TRIGGER_STAT_ACTIVE: { - smaDebug("vgId:%d, rsma fetch task started for level:%" PRIi8 " suid:%" PRIi64 " since stat is active", + smaDebug("vgId:%d, rsma fetch task planned for level:%" PRIi8 " suid:%" PRIi64 " since stat is active", SMA_VID(pSma), pItem->level, pRSmaInfo->suid); // async process - tdRSmaFetchSend(pSma, pRSmaInfo, pItem->level); + pItem->fetchLevel = pItem->level; +#if 0 + SRSmaInfo *qInfo = tdAcquireRSmaInfoBySuid(pSma, pRSmaInfo->suid); + SRSmaInfoItem *qItem = RSMA_INFO_ITEM(qInfo, pItem->level - 1); + ASSERT(qItem->level == pItem->level); + ASSERT(qItem->fetchLevel == pItem->fetchLevel); +#endif + if (atomic_load_8(&pRSmaInfo->assigned) == 0) { + tsem_post(&(pStat->notEmpty)); + } } break; case TASK_TRIGGER_STAT_PAUSED: { smaDebug("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid:%" PRIi64 " since stat is paused", @@ -1382,127 +1540,265 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) { SMA_VID(pSma), pItem->level, pRSmaInfo->suid); } break; default: { - smaWarn("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid:%" PRIi64 " since stat is unknown", - SMA_VID(pSma), pItem->level, pRSmaInfo->suid); + smaDebug("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid:%" PRIi64 " since stat is unknown", + SMA_VID(pSma), pItem->level, pRSmaInfo->suid); } break; } _end: - // taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay, pItem, smaMgmt.tmrHandle, &pItem->tmrId); + taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay, pItem, smaMgmt.tmrHandle, &pItem->tmrId); tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId); } +static void tdFreeRSmaSubmitItems(SArray *pItems) { + ASSERT(taosArrayGetSize(pItems) > 0); + for (int32_t i = 0; i < taosArrayGetSize(pItems); ++i) { + taosFreeQitem(*(void **)taosArrayGet(pItems, i)); + } + taosArrayClear(pItems); +} + /** - * @brief put rsma fetch msg to fetch queue + * @brief fetch rsma result(consider the efficiency and functionality) * * @param pSma * @param pInfo - * @param level + * @param pSubmitArr * @return int32_t */ -int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level) { - SRSmaFetchMsg fetchMsg = {.suid = pInfo->suid, .level = level}; - int32_t ret = 0; - int32_t contLen = 0; - SEncoder encoder = {0}; - tEncodeSize(tEncodeSRSmaFetchMsg, &fetchMsg, contLen, ret); - if (ret < 0) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - tEncoderClear(&encoder); - goto _err; - } +static int32_t tdRSmaFetchAllResult(SSma *pSma, SRSmaInfo *pInfo, SArray *pSubmitArr) { + SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL}; + for (int8_t i = 1; i <= TSDB_RETENTION_L2; ++i) { + SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, i - 1); + if (pItem->fetchLevel) { + pItem->fetchLevel = 0; + qTaskInfo_t taskInfo = RSMA_INFO_QTASK(pInfo, i - 1); + if (!taskInfo) { + continue; + } - void *pBuf = rpcMallocCont(contLen + sizeof(SMsgHead)); - tEncoderInit(&encoder, POINTER_SHIFT(pBuf, sizeof(SMsgHead)), contLen); - if (tEncodeSRSmaFetchMsg(&encoder, &fetchMsg) < 0) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - tEncoderClear(&encoder); - } - tEncoderClear(&encoder); + int64_t curMs = taosGetTimestampMs(); + if ((pItem->nSkipped * pItem->maxDelay) > RSMA_FETCH_DELAY_MAX) { + smaInfo("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nSkipped:%" PRIi8 " maxDelay:%d, fetch executed", + SMA_VID(pSma), pInfo->suid, i, pItem->nSkipped, pItem->maxDelay); + } else if (((curMs - pInfo->lastRecv) < RSMA_FETCH_ACTIVE_MAX)) { + ++pItem->nSkipped; + smaDebug("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " curMs:%" PRIi64 " lastRecv:%" PRIi64 ", fetch skipped ", + SMA_VID(pSma), pInfo->suid, i, curMs, pInfo->lastRecv); + continue; + } else { + smaInfo("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " curMs:%" PRIi64 " lastRecv:%" PRIi64 ", fetch executed ", + SMA_VID(pSma), pInfo->suid, i, curMs, pInfo->lastRecv); + } - ((SMsgHead *)pBuf)->vgId = SMA_VID(pSma); - ((SMsgHead *)pBuf)->contLen = contLen + sizeof(SMsgHead); + pItem->nSkipped = 0; - SRpcMsg rpcMsg = { - .code = 0, - .msgType = TDMT_VND_FETCH_RSMA, - .pCont = pBuf, - .contLen = contLen, - }; + if ((terrno = qSetMultiStreamInput(taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK)) < 0) { + goto _err; + } + if (tdRSmaExecAndSubmitResult(pSma, taskInfo, pItem, pInfo->pTSchema, pInfo->suid) < 0) { + tdCleanupStreamInputDataBlock(taskInfo); + goto _err; + } - if ((terrno = tmsgPutToQueue(&pSma->pVnode->msgCb, QUERY_QUEUE, &rpcMsg)) != 0) { - smaError("vgId:%d, failed to put rsma fetch msg into fetch-queue for suid:%" PRIi64 " level:%" PRIi8 " since %s", - SMA_VID(pSma), pInfo->suid, level, terrstr()); - goto _err; + tdCleanupStreamInputDataBlock(taskInfo); + smaInfo("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nSkipped:%" PRIi8 " maxDelay:%d, fetch finished", + SMA_VID(pSma), pInfo->suid, i, pItem->nSkipped, pItem->maxDelay); + } else { + smaDebug("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nSkipped:%" PRIi8 + " maxDelay:%d, fetch not executed as fetch level is %" PRIi8, + SMA_VID(pSma), pInfo->suid, i, pItem->nSkipped, pItem->maxDelay, pItem->fetchLevel); + } } - smaDebug("vgId:%d, success to put rsma fetch msg into fetch-queue for suid:%" PRIi64 " level:%" PRIi8, SMA_VID(pSma), - pInfo->suid, level); +_end: + tdReleaseRSmaInfo(pSma, pInfo); + return TSDB_CODE_SUCCESS; +_err: + tdReleaseRSmaInfo(pSma, pInfo); + return TSDB_CODE_FAILED; +} + +static int32_t tdRSmaBatchExec(SSma *pSma, SRSmaInfo *pInfo, STaosQall *qall, SArray *pSubmitArr, ERsmaExecType type) { + taosArrayClear(pSubmitArr); + while (1) { + void *msg = NULL; + taosGetQitem(qall, (void **)&msg); + if (msg) { + if (taosArrayPush(pSubmitArr, &msg) < 0) { + tdFreeRSmaSubmitItems(pSubmitArr); + goto _err; + } + } else { + break; + } + } + int32_t size = taosArrayGetSize(pSubmitArr); + if (size > 0) { + for (int32_t i = 1; i <= TSDB_RETENTION_L2; ++i) { + if (tdExecuteRSmaImpl(pSma, pSubmitArr->pData, size, STREAM_INPUT__MERGED_SUBMIT, pInfo, type, i) < 0) { + tdFreeRSmaSubmitItems(pSubmitArr); + goto _err; + } + } + tdFreeRSmaSubmitItems(pSubmitArr); + } return TSDB_CODE_SUCCESS; _err: + while (1) { + void *msg = NULL; + taosGetQitem(qall, (void **)&msg); + if (msg) { + taosFreeQitem(msg); + } else { + break; + } + } return TSDB_CODE_FAILED; } /** - * @brief fetch rsma data of level 2/3 and submit + * @brief * * @param pSma - * @param pMsg + * @param type * @return int32_t */ -int32_t smaProcessFetch(SSma *pSma, void *pMsg) { - SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg; - SRSmaFetchMsg req = {0}; - SDecoder decoder = {0}; - void *pBuf = NULL; - SRSmaInfo *pInfo = NULL; - SRSmaInfoItem *pItem = NULL; - - if (!pRpcMsg || pRpcMsg->contLen < sizeof(SMsgHead)) { - terrno = TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP; - goto _err; - } - pBuf = POINTER_SHIFT(pRpcMsg->pCont, sizeof(SMsgHead)); +int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) { + SVnode *pVnode = pSma->pVnode; + SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); + SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pEnv); + SHashObj *infoHash = NULL; + SArray *pSubmitArr = NULL; + bool isFetchAll = false; - tDecoderInit(&decoder, pBuf, pRpcMsg->contLen); - if (tDecodeSRSmaFetchMsg(&decoder, &req) < 0) { - terrno = TSDB_CODE_INVALID_MSG; + if (!pRSmaStat || !(infoHash = RSMA_INFO_HASH(pRSmaStat))) { + terrno = TSDB_CODE_RSMA_INVALID_STAT; goto _err; } - pInfo = tdAcquireRSmaInfoBySuid(pSma, req.suid); - if (!pInfo) { - if (terrno == TSDB_CODE_SUCCESS) { - terrno = TSDB_CODE_RSMA_EMPTY_INFO; - } - smaWarn("vgId:%d, failed to process rsma fetch msg for suid:%" PRIi64 " level:%" PRIi8 " since %s", SMA_VID(pSma), - req.suid, req.level, terrstr()); + if (!(pSubmitArr = + taosArrayInit(TMIN(RSMA_SUBMIT_BATCH_SIZE, atomic_load_64(&pRSmaStat->nBufItems)), POINTER_BYTES))) { + terrno = TSDB_CODE_OUT_OF_MEMORY; goto _err; } - pItem = RSMA_INFO_ITEM(pInfo, req.level - 1); + while (true) { + // step 1: rsma exec - consume data in buffer queue for all suids + if (type == RSMA_EXEC_OVERFLOW || type == RSMA_EXEC_COMMIT) { + void *pIter = NULL; + while ((pIter = taosHashIterate(infoHash, pIter))) { + SRSmaInfo *pInfo = *(SRSmaInfo **)pIter; + if (atomic_val_compare_exchange_8(&pInfo->assigned, 0, 1) == 0) { + if ((taosQueueItemSize(pInfo->queue) > 0) || RSMA_INFO_ITEM(pInfo, 0)->fetchLevel || + RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) { + int32_t batchCnt = -1; + int32_t batchMax = taosHashGetSize(infoHash) / tsNumOfVnodeRsmaThreads; + bool occupied = (batchMax <= 1); + if (batchMax > 1) { + batchMax = 100 / batchMax; + } + while (occupied || (++batchCnt < batchMax)) { // greedy mode + taosReadAllQitems(pInfo->queue, pInfo->qall); // queue has mutex lock + int32_t qallItemSize = taosQallItemSize(pInfo->qall); + if (qallItemSize > 0) { + tdRSmaBatchExec(pSma, pInfo, pInfo->qall, pSubmitArr, type); + smaDebug("vgId:%d, batchSize:%d, execType:%" PRIi8, SMA_VID(pSma), qallItemSize, type); + } + + int8_t oldStat = atomic_val_compare_exchange_8(RSMA_COMMIT_STAT(pRSmaStat), 0, 2); + if (oldStat == 0 || + ((oldStat == 2) && atomic_load_8(RSMA_TRIGGER_STAT(pRSmaStat)) < TASK_TRIGGER_STAT_PAUSED)) { + atomic_fetch_add_32(&pRSmaStat->nFetchAll, 1); + tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr); + if (0 == atomic_sub_fetch_32(&pRSmaStat->nFetchAll, 1)) { + atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 0); + } + } + + if (qallItemSize > 0) { + atomic_fetch_sub_64(&pRSmaStat->nBufItems, qallItemSize); + continue; + } else if (RSMA_INFO_ITEM(pInfo, 0)->fetchLevel || RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) { + if (atomic_load_8(RSMA_COMMIT_STAT(pRSmaStat)) == 0) { + continue; + } + for (int32_t j = 0; j < TSDB_RETENTION_L2; ++j) { + SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, j); + if (pItem->fetchLevel) { + pItem->fetchLevel = 0; + taosTmrReset(tdRSmaFetchTrigger, RSMA_FETCH_INTERVAL, pItem, smaMgmt.tmrHandle, &pItem->tmrId); + } + } + } + + break; + } + } + atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0); + } + } + if (type == RSMA_EXEC_COMMIT) { + if (atomic_load_64(&pRSmaStat->nBufItems) <= 0) { + break; + } else { + // commit should wait for all items be consumed + continue; + } + } + } +#if 0 + else if (type == RSMA_EXEC_COMMIT) { + while (pIter) { + SRSmaInfo *pInfo = *(SRSmaInfo **)pIter; + if (taosQueueItemSize(pInfo->iQueue)) { + if (atomic_val_compare_exchange_8(&pInfo->assigned, 0, 1) == 0) { + taosReadAllQitems(pInfo->iQueue, pInfo->iQall); // queue has mutex lock + int32_t qallItemSize = taosQallItemSize(pInfo->iQall); + if (qallItemSize > 0) { + atomic_fetch_sub_64(&pRSmaStat->nBufItems, qallItemSize); + nIdle = 0; + + // batch exec + tdRSmaBatchExec(pSma, pInfo, pInfo->qall, pSubmitArr, type); + } + + // tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr); + atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0); + } + } + ASSERT(taosQueueItemSize(pInfo->iQueue) == 0); + pIter = taosHashIterate(infoHash, pIter); + } + break; + } +#endif + else { + ASSERT(0); + } - SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL}; - qTaskInfo_t taskInfo = RSMA_INFO_QTASK(pInfo, req.level - 1); - if ((terrno = qSetMultiStreamInput(taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK)) < 0) { - goto _err; - } - if (tdRSmaFetchAndSubmitResult(pSma, taskInfo, pItem, pInfo->pTSchema, pInfo->suid, STREAM_INPUT__DATA_BLOCK) < 0) { - goto _err; - } + if (atomic_load_64(&pRSmaStat->nBufItems) <= 0) { + if (pEnv->flag & SMA_ENV_FLG_CLOSE) { + break; + } - tdCleanupStreamInputDataBlock(taskInfo); + tsem_wait(&pRSmaStat->notEmpty); - tdReleaseRSmaInfo(pSma, pInfo); - tDecoderClear(&decoder); - smaDebug("vgId:%d, success to process rsma fetch msg for suid:%" PRIi64 " level:%" PRIi8, SMA_VID(pSma), req.suid, - req.level); + if ((pEnv->flag & SMA_ENV_FLG_CLOSE) && (atomic_load_64(&pRSmaStat->nBufItems) <= 0)) { + smaInfo("vgId:%d, exec task end, flag:%" PRIi8 ", nBufItems:%" PRIi64, SMA_VID(pSma), pEnv->flag, + atomic_load_64(&pRSmaStat->nBufItems)); + break; + } + } + + } // end of while(true) + +_end: + taosArrayDestroy(pSubmitArr); return TSDB_CODE_SUCCESS; _err: - tdReleaseRSmaInfo(pSma, pInfo); - tDecoderClear(&decoder); - smaError("vgId:%d, failed to process rsma fetch msg since %s", SMA_VID(pSma), terrstr()); + taosArrayDestroy(pSubmitArr); return TSDB_CODE_FAILED; } diff --git a/source/dnode/vnode/src/sma/smaSnapshot.c b/source/dnode/vnode/src/sma/smaSnapshot.c index fbcd2af7516e3f5bd96891a177af42213d78fefe..335c15a539ef31d66d83377f90da225e45ffd893 100644 --- a/source/dnode/vnode/src/sma/smaSnapshot.c +++ b/source/dnode/vnode/src/sma/smaSnapshot.c @@ -139,7 +139,6 @@ static int32_t rsmaSnapReadQTaskInfo(SRsmaSnapReader* pReader, uint8_t** ppBuf) smaInfo("vgId:%d, vnode snapshot rsma read qtaskinfo, size:%" PRIi64, SMA_VID(pSma), size); - SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppBuf); pHdr->type = SNAP_DATA_QTASK; pHdr->size = size; @@ -279,7 +278,8 @@ int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapWrit TdFilePtr qTaskF = taosCreateFile(qTaskInfoFullName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); if (!qTaskF) { code = TAOS_SYSTEM_ERROR(errno); - smaError("vgId:%d, rsma snapshot writer open %s failed since %s", TD_VID(pSma->pVnode), qTaskInfoFullName, tstrerror(code)); + smaError("vgId:%d, rsma snapshot writer open %s failed since %s", TD_VID(pSma->pVnode), qTaskInfoFullName, + tstrerror(code)); goto _err; } qWriter->pWriteH = qTaskF; @@ -309,7 +309,7 @@ int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback) { if (rollback) { // TODO: rsma1/rsma2 // qtaskinfo - if(pWriter->pQTaskFWriter) { + if (pWriter->pQTaskFWriter) { taosRemoveFile(pWriter->pQTaskFWriter->fname); } } else { diff --git a/source/dnode/vnode/src/sma/smaTimeRange.c b/source/dnode/vnode/src/sma/smaTimeRange.c index f46d9dc29c0435dedf901feae1fcc06e0b3fa3b7..1687cd46a07a7b0a70107eb825fb06b6f9314441 100644 --- a/source/dnode/vnode/src/sma/smaTimeRange.c +++ b/source/dnode/vnode/src/sma/smaTimeRange.c @@ -175,7 +175,7 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) { } tdRefSmaStat(pSma, pStat); - pTsmaStat = SMA_TSMA_STAT(pStat); + pTsmaStat = SMA_STAT_TSMA(pStat); if (!pTsmaStat->pTSma) { STSma *pTSma = metaGetSmaInfoByIndex(SMA_META(pSma), indexUid); @@ -201,9 +201,8 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) { } SBatchDeleteReq deleteReq; - SSubmitReq *pSubmitReq = - tdBlockToSubmit(pSma->pVnode, (const SArray *)msg, pTsmaStat->pTSchema, true, pTsmaStat->pTSma->dstTbUid, - pTsmaStat->pTSma->dstTbName, pTsmaStat->pTSma->dstVgId, &deleteReq); + SSubmitReq *pSubmitReq = tqBlockToSubmit(pSma->pVnode, (const SArray *)msg, pTsmaStat->pTSchema, true, + pTsmaStat->pTSma->dstTbUid, pTsmaStat->pTSma->dstTbName, &deleteReq); if (!pSubmitReq) { smaError("vgId:%d, failed to gen submit blk while tsma insert for smaIndex %" PRIi64 " since %s", SMA_VID(pSma), diff --git a/source/dnode/vnode/src/sma/smaUtil.c b/source/dnode/vnode/src/sma/smaUtil.c index d9f38ffd090fffcb6cf110265f1d2929714da669..d771797963a5cd9d242fea1f4d65a5634f12b5e8 100644 --- a/source/dnode/vnode/src/sma/smaUtil.c +++ b/source/dnode/vnode/src/sma/smaUtil.c @@ -350,49 +350,48 @@ _err: } /** - * @brief pTSchema is shared + * @brief Clone qTaskInfo of SRSmaInfo * * @param pSma - * @param pDest - * @param pSrc + * @param pInfo * @return int32_t */ -int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo **pDest, SRSmaInfo *pSrc) { - SVnode *pVnode = pSma->pVnode; +int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pInfo) { SRSmaParam *param = NULL; - if (!pSrc) { - *pDest = NULL; + if (!pInfo) { return TSDB_CODE_SUCCESS; } SMetaReader mr = {0}; metaReaderInit(&mr, SMA_META(pSma), 0); - smaDebug("vgId:%d, rsma clone, suid is %" PRIi64, TD_VID(pVnode), pSrc->suid); - if (metaGetTableEntryByUid(&mr, pSrc->suid) < 0) { - smaError("vgId:%d, rsma clone, failed to get table meta for %" PRIi64 " since %s", TD_VID(pVnode), pSrc->suid, + smaDebug("vgId:%d, rsma clone qTaskInfo for suid:%" PRIi64, SMA_VID(pSma), pInfo->suid); + if (metaGetTableEntryByUid(&mr, pInfo->suid) < 0) { + smaError("vgId:%d, rsma clone, failed to get table meta for %" PRIi64 " since %s", SMA_VID(pSma), pInfo->suid, terrstr()); goto _err; } ASSERT(mr.me.type == TSDB_SUPER_TABLE); - ASSERT(mr.me.uid == pSrc->suid); + ASSERT(mr.me.uid == pInfo->suid); if (TABLE_IS_ROLLUP(mr.me.flags)) { param = &mr.me.stbEntry.rsmaParam; for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) { - if (tdCloneQTaskInfo(pSma, pSrc->iTaskInfo[i], pSrc->taskInfo[i], param, pSrc->suid, i) < 0) { + if (!pInfo->iTaskInfo[i]) { + continue; + } + if (tdCloneQTaskInfo(pSma, pInfo->taskInfo[i], pInfo->iTaskInfo[i], param, pInfo->suid, i) < 0) { goto _err; } } - smaDebug("vgId:%d, rsma clone env success for %" PRIi64, TD_VID(pVnode), pSrc->suid); + smaDebug("vgId:%d, rsma clone env success for %" PRIi64, SMA_VID(pSma), pInfo->suid); + } else { + terrno = TSDB_CODE_RSMA_INVALID_SCHEMA; + goto _err; } metaReaderClear(&mr); - - *pDest = pSrc; // pointer copy - return TSDB_CODE_SUCCESS; _err: - *pDest = NULL; metaReaderClear(&mr); - smaError("vgId:%d, rsma clone env failed for %" PRIi64 " since %s", TD_VID(pVnode), pSrc->suid, terrstr()); + smaError("vgId:%d, rsma clone env failed for %" PRIi64 " since %s", SMA_VID(pSma), pInfo->suid, terrstr()); return TSDB_CODE_FAILED; } \ No newline at end of file diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 112543e340cee6ba6f558a145c27653e0f5e5c70..eed997b486f39a25eb09abc35158c48c5216f5af 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -60,11 +60,11 @@ STQ* tqOpen(const char* path, SVnode* pVnode) { pTq->path = strdup(path); pTq->pVnode = pVnode; - pTq->handles = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK); + pTq->pHandle = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK); - pTq->pushMgr = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK); + pTq->pPushMgr = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK); - pTq->pAlterInfo = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK); + pTq->pCheckInfo = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK); if (tqMetaOpen(pTq) < 0) { ASSERT(0); @@ -79,15 +79,19 @@ STQ* tqOpen(const char* path, SVnode* pVnode) { ASSERT(0); } + if (streamLoadTasks(pTq->pStreamMeta) < 0) { + ASSERT(0); + } + return pTq; } void tqClose(STQ* pTq) { if (pTq) { tqOffsetClose(pTq->pOffsetStore); - taosHashCleanup(pTq->handles); - taosHashCleanup(pTq->pushMgr); - taosHashCleanup(pTq->pAlterInfo); + taosHashCleanup(pTq->pHandle); + taosHashCleanup(pTq->pPushMgr); + taosHashCleanup(pTq->pCheckInfo); taosMemoryFree(pTq->path); tqMetaClose(pTq); streamMetaClose(pTq->pStreamMeta); @@ -96,7 +100,13 @@ void tqClose(STQ* pTq) { } int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqMetaRsp* pRsp) { - int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqMetaRsp(NULL, pRsp); + int32_t len = 0; + int32_t code = 0; + tEncodeSize(tEncodeSMqMetaRsp, pRsp, len, code); + if (code < 0) { + return -1; + } + int32_t tlen = sizeof(SMqRspHead) + len; void* buf = rpcMallocCont(tlen); if (buf == NULL) { return -1; @@ -107,7 +117,11 @@ int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, ((SMqRspHead*)buf)->consumerId = pReq->consumerId; void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead)); - tEncodeSMqMetaRsp(&abuf, pRsp); + + SEncoder encoder = {0}; + tEncoderInit(&encoder, abuf, len); + tEncodeSMqMetaRsp(&encoder, pRsp); + tEncoderClear(&encoder); SRpcMsg resp = { .info = pMsg->info, @@ -117,9 +131,8 @@ int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, }; tmsgSendRsp(&resp); - tqDebug("vgId:%d, from consumer:%" PRId64 ", (epoch %d) send rsp, res msg type %d, reqOffset:%" PRId64 - ", rspOffset:%" PRId64, - TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->resMsgType, pRsp->reqOffset, pRsp->rspOffset); + tqDebug("vgId:%d, from consumer:%" PRId64 ", (epoch %d) send rsp, res msg type %d, offset type:%d", + TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->resMsgType, pRsp->rspOffset.type); return 0; } @@ -183,7 +196,12 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con return 0; } -int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ver) { +static FORCE_INLINE bool tqOffsetLessOrEqual(const STqOffset* pLeft, const STqOffset* pRight) { + return pLeft->val.type == TMQ_OFFSET__LOG && pRight->val.type == TMQ_OFFSET__LOG && + pLeft->val.version <= pRight->val.version; +} + +int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) { STqOffset offset = {0}; SDecoder decoder; tDecoderInit(&decoder, msg, msgLen); @@ -193,25 +211,30 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ve } tDecoderClear(&decoder); - if (offset.val.type == TMQ_OFFSET__SNAPSHOT_DATA) { + if (offset.val.type == TMQ_OFFSET__SNAPSHOT_DATA || offset.val.type == TMQ_OFFSET__SNAPSHOT_META) { tqDebug("receive offset commit msg to %s on vgId:%d, offset(type:snapshot) uid:%" PRId64 ", ts:%" PRId64, offset.subKey, TD_VID(pTq->pVnode), offset.val.uid, offset.val.ts); } else if (offset.val.type == TMQ_OFFSET__LOG) { tqDebug("receive offset commit msg to %s on vgId:%d, offset(type:log) version:%" PRId64, offset.subKey, TD_VID(pTq->pVnode), offset.val.version); + if (offset.val.version + 1 == version) { + offset.val.version += 1; + } } else { ASSERT(0); } - /*STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, offset.subKey);*/ - /*if (pOffset != NULL) {*/ - /*if (pOffset->val.type == TMQ_OFFSET__LOG && pOffset->val.version < offset.val.version) {*/ + STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, offset.subKey); + if (pOffset != NULL && tqOffsetLessOrEqual(&offset, pOffset)) { + return 0; + } + if (tqOffsetWrite(pTq->pOffsetStore, &offset) < 0) { ASSERT(0); return -1; } if (offset.val.type == TMQ_OFFSET__LOG) { - STqHandle* pHandle = taosHashGet(pTq->handles, offset.subKey, strlen(offset.subKey)); + STqHandle* pHandle = taosHashGet(pTq->pHandle, offset.subKey, strlen(offset.subKey)); if (pHandle) { if (walRefVer(pHandle->pRef, offset.val.version) < 0) { ASSERT(0); @@ -220,6 +243,8 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ve } } + // rsp + /*}*/ /*}*/ @@ -229,15 +254,15 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ve int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId) { void* pIter = NULL; while (1) { - pIter = taosHashIterate(pTq->pAlterInfo, pIter); + pIter = taosHashIterate(pTq->pCheckInfo, pIter); if (pIter == NULL) break; - SCheckAlterInfo* pCheck = (SCheckAlterInfo*)pIter; + STqCheckInfo* pCheck = (STqCheckInfo*)pIter; if (pCheck->ntbUid == tbUid) { int32_t sz = taosArrayGetSize(pCheck->colIdList); for (int32_t i = 0; i < sz; i++) { int16_t forbidColId = *(int16_t*)taosArrayGet(pCheck->colIdList, i); if (forbidColId == colId) { - taosHashCancelIterate(pTq->pAlterInfo, pIter); + taosHashCancelIterate(pTq->pCheckInfo, pIter); return -1; } } @@ -281,7 +306,6 @@ static int32_t tqInitDataRsp(SMqDataRsp* pRsp, const SMqPollReq* pReq, int8_t su int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { SMqPollReq* pReq = pMsg->pCont; int64_t consumerId = pReq->consumerId; - int64_t timeout = pReq->timeout; int32_t reqEpoch = pReq->epoch; int32_t code = 0; STqOffsetVal reqOffset = pReq->reqOffset; @@ -289,7 +313,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { SWalCkHead* pCkHead = NULL; // 1.find handle - STqHandle* pHandle = taosHashGet(pTq->handles, pReq->subKey, strlen(pReq->subKey)); + STqHandle* pHandle = taosHashGet(pTq->pHandle, pReq->subKey, strlen(pReq->subKey)); /*ASSERT(pHandle);*/ if (pHandle == NULL) { tqError("tmq poll: no consumer handle for consumer:%" PRId64 ", in vgId:%d, subkey %s", consumerId, @@ -333,12 +357,11 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { TD_VID(pTq->pVnode), formatBuf); } else { if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) { - if (pReq->useSnapshot && pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { - if (!pHandle->fetchMeta) { - tqOffsetResetToData(&fetchOffsetNew, 0, 0); + if (pReq->useSnapshot) { + if (pHandle->fetchMeta) { + tqOffsetResetToMeta(&fetchOffsetNew, 0); } else { - // reset to meta - ASSERT(0); + tqOffsetResetToData(&fetchOffsetNew, 0, 0); } } else { tqOffsetResetToLog(&fetchOffsetNew, walGetFirstVer(pTq->pVnode->pWal)); @@ -350,40 +373,50 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) { code = -1; } - goto OVER; + tDeleteSMqDataRsp(&dataRsp); + return code; } else if (reqOffset.type == TMQ_OFFSET__RESET_NONE) { tqError("tmq poll: subkey %s, no offset committed for consumer %" PRId64 " in vg %d, subkey %s, reset none failed", pHandle->subKey, consumerId, TD_VID(pTq->pVnode), pReq->subKey); terrno = TSDB_CODE_TQ_NO_COMMITTED_OFFSET; code = -1; - goto OVER; + tDeleteSMqDataRsp(&dataRsp); + return code; } } } - // 3.query - if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { - /*if (fetchOffsetNew.type == TMQ_OFFSET__LOG) {*/ - /*fetchOffsetNew.version++;*/ - /*}*/ - if (tqScan(pTq, pHandle, &dataRsp, &fetchOffsetNew) < 0) { - ASSERT(0); - code = -1; + if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN || fetchOffsetNew.type != TMQ_OFFSET__LOG) { + SMqMetaRsp metaRsp = {0}; + tqScan(pTq, pHandle, &dataRsp, &metaRsp, &fetchOffsetNew); + + if (metaRsp.metaRspLen > 0) { + if (tqSendMetaPollRsp(pTq, pMsg, pReq, &metaRsp) < 0) { + code = -1; + } + tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send meta offset type:%d,uid:%ld,version:%ld", consumerId, + pHandle->subKey, TD_VID(pTq->pVnode), metaRsp.rspOffset.type, metaRsp.rspOffset.uid, + metaRsp.rspOffset.version); + taosMemoryFree(metaRsp.metaRsp); goto OVER; } - if (dataRsp.blockNum == 0) { - // TODO add to async task pool - /*dataRsp.rspOffset.version--;*/ - } - if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) { - code = -1; + + if (dataRsp.blockNum > 0) { + if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) { + code = -1; + } + goto OVER; + } else { + fetchOffsetNew = dataRsp.rspOffset; } - goto OVER; + + tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send data blockNum:%d, offset type:%d,uid:%ld,version:%ld", + consumerId, pHandle->subKey, TD_VID(pTq->pVnode), dataRsp.blockNum, dataRsp.rspOffset.type, + dataRsp.rspOffset.uid, dataRsp.rspOffset.version); } - if (pHandle->execHandle.subType != TOPIC_SUB_TYPE__COLUMN) { - ASSERT(fetchOffsetNew.type == TMQ_OFFSET__LOG); + if (pHandle->execHandle.subType != TOPIC_SUB_TYPE__COLUMN && fetchOffsetNew.type == TMQ_OFFSET__LOG) { int64_t fetchVer = fetchOffsetNew.version + 1; pCkHead = taosMemoryMalloc(sizeof(SWalCkHead) + 2048); if (pCkHead == NULL) { @@ -406,7 +439,6 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { // TODO add push mgr tqOffsetResetToLog(&dataRsp.rspOffset, fetchVer); - ASSERT(dataRsp.rspOffset.version >= dataRsp.reqOffset.version); if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) { code = -1; } @@ -421,15 +453,13 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { if (pHead->msgType == TDMT_VND_SUBMIT) { SSubmitReq* pCont = (SSubmitReq*)&pHead->body; - if (tqLogScanExec(pTq, &pHandle->execHandle, pCont, &dataRsp) < 0) { + if (tqLogScanExec(pTq, pHandle, pCont, &dataRsp) < 0) { /*ASSERT(0);*/ } // TODO batch optimization: // TODO continue scan until meeting batch requirement if (dataRsp.blockNum > 0 /* threshold */) { tqOffsetResetToLog(&dataRsp.rspOffset, fetchVer); - ASSERT(dataRsp.rspOffset.version >= dataRsp.reqOffset.version); - if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) { code = -1; } @@ -443,11 +473,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { ASSERT(IS_META_MSG(pHead->msgType)); tqDebug("fetch meta msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType); SMqMetaRsp metaRsp = {0}; - /*metaRsp.reqOffset = pReq->reqOffset.version;*/ - metaRsp.rspOffset = fetchVer; - /*metaRsp.rspOffsetNew.version = fetchVer;*/ - tqOffsetResetToLog(&metaRsp.reqOffsetNew, pReq->reqOffset.version); - tqOffsetResetToLog(&metaRsp.rspOffsetNew, fetchVer); + tqOffsetResetToLog(&metaRsp.rspOffset, fetchVer); metaRsp.resMsgType = pHead->msgType; metaRsp.metaRspLen = pHead->bodyLen; metaRsp.metaRsp = pHead->body; @@ -461,27 +487,21 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { } } -OVER: - if (pCkHead) taosMemoryFree(pCkHead); - // TODO wrap in destroy func - taosArrayDestroy(dataRsp.blockDataLen); - taosArrayDestroyP(dataRsp.blockData, (FDelete)taosMemoryFree); - - if (dataRsp.withSchema) { - taosArrayDestroyP(dataRsp.blockSchema, (FDelete)tDeleteSSchemaWrapper); - } - - if (dataRsp.withTbName) { - taosArrayDestroyP(dataRsp.blockTbName, (FDelete)taosMemoryFree); + // send empty to client + if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) { + code = -1; } +OVER: + if (pCkHead) taosMemoryFree(pCkHead); + tDeleteSMqDataRsp(&dataRsp); return code; } -int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen) { +int32_t tqProcessVgDeleteReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) { SMqVDeleteReq* pReq = (SMqVDeleteReq*)msg; - int32_t code = taosHashRemove(pTq->handles, pReq->subKey, strlen(pReq->subKey)); + int32_t code = taosHashRemove(pTq->pHandle, pReq->subKey, strlen(pReq->subKey)); ASSERT(code == 0); tqOffsetDelete(pTq->pOffsetStore, pReq->subKey); @@ -492,27 +512,43 @@ int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen) { return 0; } -int32_t tqProcessCheckAlterInfoReq(STQ* pTq, char* msg, int32_t msgLen) { - SCheckAlterInfo info = {0}; - SDecoder decoder; +int32_t tqProcessAddCheckInfoReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) { + STqCheckInfo info = {0}; + SDecoder decoder; tDecoderInit(&decoder, msg, msgLen); - if (tDecodeSCheckAlterInfo(&decoder, &info) < 0) { + if (tDecodeSTqCheckInfo(&decoder, &info) < 0) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } tDecoderClear(&decoder); - if (taosHashPut(pTq->pAlterInfo, info.topic, strlen(info.topic), &info, sizeof(SCheckAlterInfo)) < 0) { + if (taosHashPut(pTq->pCheckInfo, info.topic, strlen(info.topic), &info, sizeof(STqCheckInfo)) < 0) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + if (tqMetaSaveCheckInfo(pTq, info.topic, msg, msgLen) < 0) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } return 0; } -int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) { +int32_t tqProcessDelCheckInfoReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) { + if (taosHashRemove(pTq->pCheckInfo, msg, strlen(msg)) < 0) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + if (tqMetaDeleteCheckInfo(pTq, msg) < 0) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + return 0; +} + +int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) { SMqRebVgReq req = {0}; tDecodeSMqRebVgReq(msg, &req); // todo lock - STqHandle* pHandle = taosHashGet(pTq->handles, req.subKey, strlen(req.subKey)); + STqHandle* pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey)); if (pHandle == NULL) { if (req.oldConsumerId != -1) { tqError("vgId:%d, build new consumer handle %s for consumer %d, but old consumerId is %ld", req.vgId, req.subKey, @@ -529,6 +565,7 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) { pHandle->execHandle.subType = req.subType; pHandle->fetchMeta = req.withMeta; + // TODO version should be assigned and refed during preprocess SWalRef* pRef = walRefCommittedVer(pTq->pVnode->pWal); if (pRef == NULL) { @@ -538,36 +575,42 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) { int64_t ver = pRef->refVer; pHandle->pRef = pRef; + SReadHandle handle = { + .meta = pTq->pVnode->pMeta, + .vnode = pTq->pVnode, + .initTableReader = true, + .initTqReader = true, + .version = ver, + }; + pHandle->snapshotVer = ver; + if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { pHandle->execHandle.execCol.qmsg = req.qmsg; - pHandle->snapshotVer = ver; req.qmsg = NULL; - SReadHandle handle = { - .meta = pTq->pVnode->pMeta, - .vnode = pTq->pVnode, - .initTableReader = true, - .initTqReader = true, - .version = ver, - }; - pHandle->execHandle.execCol.task = + + pHandle->execHandle.task = qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, &pHandle->execHandle.numOfCols, &pHandle->execHandle.pSchemaWrapper); - ASSERT(pHandle->execHandle.execCol.task); + ASSERT(pHandle->execHandle.task); void* scanner = NULL; - qExtractStreamScanner(pHandle->execHandle.execCol.task, &scanner); + qExtractStreamScanner(pHandle->execHandle.task, &scanner); ASSERT(scanner); pHandle->execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner); ASSERT(pHandle->execHandle.pExecReader); } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) { pHandle->pWalReader = walOpenReader(pTq->pVnode->pWal, NULL); - pHandle->execHandle.pExecReader = tqOpenReader(pTq->pVnode); pHandle->execHandle.execDb.pFilterOutTbUid = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + buildSnapContext(handle.meta, handle.version, 0, pHandle->execHandle.subType, pHandle->fetchMeta, (SSnapContext **)(&handle.sContext)); + + pHandle->execHandle.task = + qCreateQueueExecTaskInfo(NULL, &handle, NULL, NULL); } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) { pHandle->pWalReader = walOpenReader(pTq->pVnode->pWal, NULL); pHandle->execHandle.execTb.suid = req.suid; + SArray* tbUidList = taosArrayInit(0, sizeof(int64_t)); vnodeGetCtbIdList(pTq->pVnode, req.suid, tbUidList); tqDebug("vgId:%d, tq try to get all ctb, suid:%" PRId64, pTq->pVnode->config.vgId, req.suid); @@ -578,8 +621,12 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) { pHandle->execHandle.pExecReader = tqOpenReader(pTq->pVnode); tqReaderSetTbUidList(pHandle->execHandle.pExecReader, tbUidList); taosArrayDestroy(tbUidList); + + buildSnapContext(handle.meta, handle.version, req.suid, pHandle->execHandle.subType, pHandle->fetchMeta, + (SSnapContext**)(&handle.sContext)); + pHandle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &handle, NULL, NULL); } - taosHashPut(pTq->handles, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle)); + taosHashPut(pTq->pHandle, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle)); tqDebug("try to persist handle %s consumer %" PRId64, req.subKey, pHandle->consumerId); if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) { // TODO @@ -600,8 +647,6 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) { } int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) { - int32_t code = 0; - if (pTask->taskLevel == TASK_LEVEL__AGG) { ASSERT(taosArrayGetSize(pTask->childEpInfo) != 0); } @@ -612,8 +657,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) { pTask->outputQueue = streamQueueOpen(); if (pTask->inputQueue == NULL || pTask->outputQueue == NULL) { - code = -1; - goto FAIL; + return -1; } pTask->inputStatus = TASK_INPUT_STATUS__NORMAL; @@ -623,17 +667,28 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) { // expand executor if (pTask->taskLevel == TASK_LEVEL__SOURCE) { + pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask); + if (pTask->pState == NULL) { + return -1; + } + SReadHandle handle = { .meta = pTq->pVnode->pMeta, .vnode = pTq->pVnode, .initTqReader = 1, + .pStateBackend = pTask->pState, }; pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle); ASSERT(pTask->exec.executor); } else if (pTask->taskLevel == TASK_LEVEL__AGG) { + pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask); + if (pTask->pState == NULL) { + return -1; + } SReadHandle mgHandle = { .vnode = NULL, .numOfVgroups = (int32_t)taosArrayGetSize(pTask->childEpInfo), + .pStateBackend = pTask->pState, }; pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle); ASSERT(pTask->exec.executor); @@ -658,44 +713,14 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) { streamSetupTrigger(pTask); - tqInfo("deploy stream task on vg %d, task id %d, child id %d", TD_VID(pTq->pVnode), pTask->taskId, + tqInfo("expand stream task on vg %d, task id %d, child id %d", TD_VID(pTq->pVnode), pTask->taskId, pTask->selfChildId); - -FAIL: - if (pTask->inputQueue) streamQueueClose(pTask->inputQueue); - if (pTask->outputQueue) streamQueueClose(pTask->outputQueue); - // TODO free executor - return code; + return 0; } -int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen) { +int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) { // - return streamMetaAddSerializedTask(pTq->pStreamMeta, msg, msgLen); -#if 0 - SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask)); - if (pTask == NULL) { - return -1; - } - SDecoder decoder; - tDecoderInit(&decoder, (uint8_t*)msg, msgLen); - if (tDecodeSStreamTask(&decoder, pTask) < 0) { - ASSERT(0); - goto FAIL; - } - tDecoderClear(&decoder); - - if (tqExpandTask(pTq, pTask) < 0) { - goto FAIL; - } - - taosHashPut(pTq->pStreamTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*)); - - return 0; - -FAIL: - if (pTask) taosMemoryFree(pTask); - return -1; -#endif + return streamMetaAddSerializedTask(pTq->pStreamMeta, version, msg, msgLen); } int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* pReq, int64_t ver) { @@ -817,7 +842,7 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) { } } -int32_t tqProcessTaskDropReq(STQ* pTq, char* msg, int32_t msgLen) { +int32_t tqProcessTaskDropReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) { SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg; return streamMetaRemoveTask(pTq->pStreamMeta, pReq->taskId); diff --git a/source/dnode/vnode/src/tq/tqExec.c b/source/dnode/vnode/src/tq/tqExec.c index 435bbb77b8cab0b6c631f98e30444501ae8faf03..da596d07f9c63797d2cfc8a628d6767aea8b468b 100644 --- a/source/dnode/vnode/src/tq/tqExec.c +++ b/source/dnode/vnode/src/tq/tqExec.c @@ -60,18 +60,58 @@ static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, SMqDataRsp* pRsp) { return 0; } -int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset) { +int64_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset) { const STqExecHandle* pExec = &pHandle->execHandle; - qTaskInfo_t task = pExec->execCol.task; + ASSERT(pExec->subType == TOPIC_SUB_TYPE__COLUMN); + + qTaskInfo_t task = pExec->task; + + if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) { + tqDebug("prepare scan failed, return"); + if (pOffset->type == TMQ_OFFSET__LOG) { + pRsp->rspOffset = *pOffset; + return 0; + } else { + tqOffsetResetToLog(pOffset, pHandle->snapshotVer); + if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) { + tqDebug("prepare scan failed, return"); + pRsp->rspOffset = *pOffset; + return 0; + } + } + } + + int32_t rowCnt = 0; + while (1) { + SSDataBlock* pDataBlock = NULL; + uint64_t ts = 0; + tqDebug("tmq task start to execute"); + if (qExecTask(task, &pDataBlock, &ts) < 0) { + ASSERT(0); + } + tqDebug("tmq task execute end, get %p", pDataBlock); - if (qStreamPrepareScan(task, pOffset) < 0) { + if (pDataBlock) { + tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols); + pRsp->blockNum++; + } + } + + return 0; +} + +int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* pOffset) { + const STqExecHandle* pExec = &pHandle->execHandle; + qTaskInfo_t task = pExec->task; + + if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) { tqDebug("prepare scan failed, return"); if (pOffset->type == TMQ_OFFSET__LOG) { pRsp->rspOffset = *pOffset; return 0; } else { tqOffsetResetToLog(pOffset, pHandle->snapshotVer); - if (qStreamPrepareScan(task, pOffset) < 0) { + if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) { tqDebug("prepare scan failed, return"); pRsp->rspOffset = *pOffset; return 0; @@ -83,24 +123,39 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVa while (1) { SSDataBlock* pDataBlock = NULL; uint64_t ts = 0; - tqDebug("task start to execute"); + tqDebug("tmqsnap task start to execute"); if (qExecTask(task, &pDataBlock, &ts) < 0) { ASSERT(0); } - tqDebug("task execute end, get %p", pDataBlock); + tqDebug("tmqsnap task execute end, get %p", pDataBlock); if (pDataBlock != NULL) { if (pRsp->withTbName) { + int64_t uid = 0; if (pOffset->type == TMQ_OFFSET__LOG) { - int64_t uid = pExec->pExecReader->msgIter.uid; + uid = pExec->pExecReader->msgIter.uid; if (tqAddTbNameToRsp(pTq, uid, pRsp) < 0) { continue; } } else { - pRsp->withTbName = 0; + char* tbName = strdup(qExtractTbnameFromTask(task)); + taosArrayPush(pRsp->blockTbName, &tbName); } } - tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols); + if (pRsp->withSchema) { + if (pOffset->type == TMQ_OFFSET__LOG) { + tqAddBlockSchemaToRsp(pExec, pRsp); + } else { + SSchemaWrapper* pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task)); + taosArrayPush(pRsp->blockSchema, &pSW); + } + } + + if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { + tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols); + } else { + tqAddBlockDataToRsp(pDataBlock, pRsp, taosArrayGetSize(pDataBlock->pDataBlock)); + } pRsp->blockNum++; if (pOffset->type == TMQ_OFFSET__LOG) { continue; @@ -110,93 +165,48 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVa } } - if (pRsp->blockNum == 0 && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) { - tqDebug("vgId: %d, tsdb consume over, switch to wal, ver %" PRId64, TD_VID(pTq->pVnode), - pHandle->snapshotVer + 1); - tqOffsetResetToLog(pOffset, pHandle->snapshotVer); - qStreamPrepareScan(task, pOffset); - continue; - } - - void* meta = qStreamExtractMetaMsg(task); - if (meta != NULL) { - // tq add meta to rsp - } - - if (qStreamExtractOffset(task, &pRsp->rspOffset) < 0) { - ASSERT(0); - } - - ASSERT(pRsp->rspOffset.type != 0); + if (pHandle->execHandle.subType != TOPIC_SUB_TYPE__COLUMN) { + if (pDataBlock == NULL && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) { + if (qStreamExtractPrepareUid(task) != 0) { + continue; + } + tqDebug("tmqsnap vgId: %d, tsdb consume over, switch to wal, ver %" PRId64, TD_VID(pTq->pVnode), + pHandle->snapshotVer + 1); + break; + } -#if 0 - if (pRsp->reqOffset.type == TMQ_OFFSET__LOG) { if (pRsp->blockNum > 0) { - ASSERT(pRsp->rspOffset.version > pRsp->reqOffset.version); - } else { - ASSERT(pRsp->rspOffset.version >= pRsp->reqOffset.version); + tqDebug("tmqsnap task exec exited, get data"); + break; + } + + SMqMetaRsp* tmp = qStreamExtractMetaMsg(task); + if (tmp->rspOffset.type == TMQ_OFFSET__SNAPSHOT_DATA) { + tqOffsetResetToData(pOffset, tmp->rspOffset.uid, tmp->rspOffset.ts); + qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType); + tmp->rspOffset.type = TMQ_OFFSET__SNAPSHOT_META; + tqDebug("tmqsnap task exec change to get data"); + continue; } + + *pMetaRsp = *tmp; + tqDebug("tmqsnap task exec exited, get meta"); } -#endif tqDebug("task exec exited"); break; } - return 0; -} - -#if 0 -int32_t tqScanSnapshot(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffsetVal offset, int32_t workerId) { - ASSERT(pExec->subType == TOPIC_SUB_TYPE__COLUMN); - qTaskInfo_t task = pExec->execCol.task[workerId]; - - if (qStreamPrepareTsdbScan(task, offset.uid, offset.ts) < 0) { + if (qStreamExtractOffset(task, &pRsp->rspOffset) < 0) { ASSERT(0); } - int32_t rowCnt = 0; - while (1) { - SSDataBlock* pDataBlock = NULL; - uint64_t ts = 0; - if (qExecTask(task, &pDataBlock, &ts) < 0) { - ASSERT(0); - } - if (pDataBlock == NULL) break; - - ASSERT(pDataBlock->info.rows != 0); - ASSERT(taosArrayGetSize(pDataBlock->pDataBlock) != 0); - - tqAddBlockDataToRsp(pDataBlock, pRsp); - - if (pRsp->withTbName) { - pRsp->withTbName = 0; -#if 0 - int64_t uid; - int64_t ts; - if (qGetStreamScanStatus(task, &uid, &ts) < 0) { - ASSERT(0); - } - tqAddTbNameToRsp(pTq, uid, pRsp); -#endif - } - pRsp->blockNum++; - - rowCnt += pDataBlock->info.rows; - if (rowCnt >= 4096) break; - } - int64_t uid; - int64_t ts; - if (qGetStreamScanStatus(task, &uid, &ts) < 0) { - ASSERT(0); - } - tqOffsetResetToData(&pRsp->rspOffset, uid, ts); - + ASSERT(pRsp->rspOffset.type != 0); return 0; } -#endif -int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataRsp* pRsp) { +int32_t tqLogScanExec(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, SMqDataRsp* pRsp) { + STqExecHandle* pExec = &pHandle->execHandle; ASSERT(pExec->subType != TOPIC_SUB_TYPE__COLUMN); if (pExec->subType == TOPIC_SUB_TYPE__TABLE) { @@ -241,6 +251,28 @@ int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataR tqAddBlockSchemaToRsp(pExec, pRsp); pRsp->blockNum++; } +#if 0 + if (pHandle->fetchMeta && pRsp->blockNum) { + SSubmitMsgIter iter = {0}; + tInitSubmitMsgIter(pReq, &iter); + STaosxRsp* pXrsp = (STaosxRsp*)pRsp; + while (1) { + SSubmitBlk* pBlk = NULL; + if (tGetSubmitMsgNext(&iter, &pBlk) < 0) return -1; + if (pBlk->schemaLen > 0) { + if (pXrsp->createTableNum == 0) { + pXrsp->createTableLen = taosArrayInit(0, sizeof(int32_t)); + pXrsp->createTableReq = taosArrayInit(0, sizeof(void*)); + } + void* createReq = taosMemoryCalloc(1, pBlk->schemaLen); + memcpy(createReq, pBlk->data, pBlk->schemaLen); + taosArrayPush(pXrsp->createTableLen, &pBlk->schemaLen); + taosArrayPush(pXrsp->createTableReq, &createReq); + pXrsp->createTableNum++; + } + } + } +#endif } if (pRsp->blockNum == 0) { diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c index 5709ad7c85ff142bbe43cce6f5e70c6953d72459..a192d1f863819f560a2b7b3ce92fe15c603c0fc1 100644 --- a/source/dnode/vnode/src/tq/tqMeta.c +++ b/source/dnode/vnode/src/tq/tqMeta.c @@ -43,86 +43,116 @@ int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) { return 0; } -int32_t tqMetaRestoreHandle(STQ* pTq) { - TBC* pCur = NULL; - if (tdbTbcOpen(pTq->pExecStore, &pCur, NULL) < 0) { +int32_t tqMetaOpen(STQ* pTq) { + if (tdbOpen(pTq->path, 16 * 1024, 1, &pTq->pMetaDB) < 0) { ASSERT(0); return -1; } - void* pKey = NULL; - int kLen = 0; - void* pVal = NULL; - int vLen = 0; - SDecoder decoder; - - tdbTbcMoveToFirst(pCur); + if (tdbTbOpen("tq.db", -1, -1, NULL, pTq->pMetaDB, &pTq->pExecStore) < 0) { + ASSERT(0); + return -1; + } - while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) { - STqHandle handle; - tDecoderInit(&decoder, (uint8_t*)pVal, vLen); - tDecodeSTqHandle(&decoder, &handle); + if (tdbTbOpen("tq.check.db", -1, -1, NULL, pTq->pMetaDB, &pTq->pCheckStore) < 0) { + ASSERT(0); + return -1; + } - handle.pRef = walOpenRef(pTq->pVnode->pWal); - if (handle.pRef == NULL) { - ASSERT(0); - return -1; - } - walRefVer(handle.pRef, handle.snapshotVer); + if (tqMetaRestoreHandle(pTq) < 0) { + return -1; + } - if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { - SReadHandle reader = { - .meta = pTq->pVnode->pMeta, - .vnode = pTq->pVnode, - .initTableReader = true, - .initTqReader = true, - .version = handle.snapshotVer, - }; - - handle.execHandle.execCol.task = qCreateQueueExecTaskInfo( - handle.execHandle.execCol.qmsg, &reader, &handle.execHandle.numOfCols, &handle.execHandle.pSchemaWrapper); - ASSERT(handle.execHandle.execCol.task); - void* scanner = NULL; - qExtractStreamScanner(handle.execHandle.execCol.task, &scanner); - ASSERT(scanner); - handle.execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner); - ASSERT(handle.execHandle.pExecReader); - } else { - handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL); - handle.execHandle.execDb.pFilterOutTbUid = - taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); - } - tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle.subKey, handle.consumerId, TD_VID(pTq->pVnode)); - taosHashPut(pTq->handles, pKey, kLen, &handle, sizeof(STqHandle)); + if (tqMetaRestoreCheckInfo(pTq) < 0) { + return -1; } - tdbTbcClose(pCur); return 0; } -int32_t tqMetaOpen(STQ* pTq) { - if (tdbOpen(pTq->path, 16 * 1024, 1, &pTq->pMetaStore) < 0) { - ASSERT(0); +int32_t tqMetaClose(STQ* pTq) { + if (pTq->pExecStore) { + tdbTbClose(pTq->pExecStore); + } + if (pTq->pCheckStore) { + tdbTbClose(pTq->pCheckStore); + } + tdbClose(pTq->pMetaDB); + return 0; +} + +int32_t tqMetaSaveCheckInfo(STQ* pTq, const char* key, const void* value, int32_t vLen) { + TXN txn; + if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { return -1; } - if (tdbTbOpen("tq.db", -1, -1, NULL, pTq->pMetaStore, &pTq->pExecStore) < 0) { - ASSERT(0); + if (tdbBegin(pTq->pMetaDB, &txn) < 0) { return -1; } - if (tqMetaRestoreHandle(pTq) < 0) { + if (tdbTbUpsert(pTq->pExecStore, key, strlen(key), value, vLen, &txn) < 0) { + return -1; + } + + if (tdbCommit(pTq->pMetaDB, &txn) < 0) { return -1; } return 0; } -int32_t tqMetaClose(STQ* pTq) { - if (pTq->pExecStore) { - tdbTbClose(pTq->pExecStore); +int32_t tqMetaDeleteCheckInfo(STQ* pTq, const char* key) { + TXN txn; + + if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { + ASSERT(0); + } + + if (tdbBegin(pTq->pMetaDB, &txn) < 0) { + ASSERT(0); + } + + if (tdbTbDelete(pTq->pCheckStore, key, (int)strlen(key), &txn) < 0) { + /*ASSERT(0);*/ + } + + if (tdbCommit(pTq->pMetaDB, &txn) < 0) { + ASSERT(0); + } + + return 0; +} + +int32_t tqMetaRestoreCheckInfo(STQ* pTq) { + TBC* pCur = NULL; + if (tdbTbcOpen(pTq->pCheckStore, &pCur, NULL) < 0) { + ASSERT(0); + return -1; + } + + void* pKey = NULL; + int kLen = 0; + void* pVal = NULL; + int vLen = 0; + SDecoder decoder; + + tdbTbcMoveToFirst(pCur); + + while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) { + STqCheckInfo info; + tDecoderInit(&decoder, (uint8_t*)pVal, vLen); + if (tDecodeSTqCheckInfo(&decoder, &info) < 0) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + tDecoderClear(&decoder); + if (taosHashPut(pTq->pCheckInfo, info.topic, strlen(info.topic), &info, sizeof(STqCheckInfo)) < 0) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } } - tdbClose(pTq->pMetaStore); + tdbTbcClose(pCur); return 0; } @@ -153,7 +183,7 @@ int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) { ASSERT(0); } - if (tdbBegin(pTq->pMetaStore, &txn) < 0) { + if (tdbBegin(pTq->pMetaDB, &txn) < 0) { ASSERT(0); } @@ -161,7 +191,7 @@ int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) { ASSERT(0); } - if (tdbCommit(pTq->pMetaStore, &txn) < 0) { + if (tdbCommit(pTq->pMetaDB, &txn) < 0) { ASSERT(0); } @@ -177,7 +207,7 @@ int32_t tqMetaDeleteHandle(STQ* pTq, const char* key) { ASSERT(0); } - if (tdbBegin(pTq->pMetaStore, &txn) < 0) { + if (tdbBegin(pTq->pMetaDB, &txn) < 0) { ASSERT(0); } @@ -185,9 +215,74 @@ int32_t tqMetaDeleteHandle(STQ* pTq, const char* key) { /*ASSERT(0);*/ } - if (tdbCommit(pTq->pMetaStore, &txn) < 0) { + if (tdbCommit(pTq->pMetaDB, &txn) < 0) { ASSERT(0); } return 0; } + +int32_t tqMetaRestoreHandle(STQ* pTq) { + TBC* pCur = NULL; + if (tdbTbcOpen(pTq->pExecStore, &pCur, NULL) < 0) { + ASSERT(0); + return -1; + } + + void* pKey = NULL; + int kLen = 0; + void* pVal = NULL; + int vLen = 0; + SDecoder decoder; + + tdbTbcMoveToFirst(pCur); + + while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) { + STqHandle handle; + tDecoderInit(&decoder, (uint8_t*)pVal, vLen); + tDecodeSTqHandle(&decoder, &handle); + + handle.pRef = walOpenRef(pTq->pVnode->pWal); + if (handle.pRef == NULL) { + ASSERT(0); + return -1; + } + walRefVer(handle.pRef, handle.snapshotVer); + + SReadHandle reader = { + .meta = pTq->pVnode->pMeta, + .vnode = pTq->pVnode, + .initTableReader = true, + .initTqReader = true, + .version = handle.snapshotVer, + }; + + if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { + + handle.execHandle.task = qCreateQueueExecTaskInfo( + handle.execHandle.execCol.qmsg, &reader, &handle.execHandle.numOfCols, &handle.execHandle.pSchemaWrapper); + ASSERT(handle.execHandle.task); + void* scanner = NULL; + qExtractStreamScanner(handle.execHandle.task, &scanner); + ASSERT(scanner); + handle.execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner); + ASSERT(handle.execHandle.pExecReader); + } else { + + handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL); + handle.execHandle.execDb.pFilterOutTbUid = + taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); +// handle.execHandle.pExecReader = tqOpenReader(pTq->pVnode); + buildSnapContext(reader.meta, reader.version, 0, handle.execHandle.subType, handle.fetchMeta, (SSnapContext **)(&reader.sContext)); + + handle.execHandle.task = + qCreateQueueExecTaskInfo(NULL, &reader, NULL, NULL); + } + tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle.subKey, handle.consumerId, TD_VID(pTq->pVnode)); + taosHashPut(pTq->pHandle, pKey, kLen, &handle, sizeof(STqHandle)); + } + + tdbTbcClose(pCur); + return 0; +} + diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c index ae3fef9b4b7ebf02654e93e09b5bf6c52f6e2354..ed7fa80c476fff2d6436232b0e610f0b6f61f1cd 100644 --- a/source/dnode/vnode/src/tq/tqPush.c +++ b/source/dnode/vnode/src/tq/tqPush.c @@ -14,6 +14,7 @@ */ #include "tq.h" +#include "vnd.h" #if 0 void tqTmrRspFunc(void* param, void* tmrId) { @@ -212,9 +213,7 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_ #endif int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) { - walApplyVer(pTq->pVnode->pWal, ver); - - if (msgType == TDMT_VND_SUBMIT) { + if (vnodeIsRoleLeader(pTq->pVnode) && msgType == TDMT_VND_SUBMIT) { if (taosHashGetSize(pTq->pStreamMeta->pTasks) == 0) return 0; void* data = taosMemoryMalloc(msgLen); diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 5d7814a045733994b823114dbd4dce6de151cb28..6e2a6fdb7126f5e81bcb64ab6e10af64725c75ff 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -68,7 +68,7 @@ int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHea offset++; } } -END: + END: taosThreadMutexUnlock(&pHandle->pWalReader->mutex); return code; } @@ -341,7 +341,7 @@ FAIL: return -1; } -void tqReaderSetColIdList(STqReader* pReadHandle, SArray* pColIdList) { pReadHandle->pColIdList = pColIdList; } +void tqReaderSetColIdList(STqReader* pReader, SArray* pColIdList) { pReader->pColIdList = pColIdList; } int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList) { if (pReader->tbIdHash) { @@ -394,11 +394,11 @@ int tqReaderRemoveTbUidList(STqReader* pReader, const SArray* tbUidList) { int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { void* pIter = NULL; while (1) { - pIter = taosHashIterate(pTq->handles, pIter); + pIter = taosHashIterate(pTq->pHandle, pIter); if (pIter == NULL) break; STqHandle* pExec = (STqHandle*)pIter; if (pExec->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { - int32_t code = qUpdateQualifiedTableId(pExec->execHandle.execCol.task, tbUidList, isAdd); + int32_t code = qUpdateQualifiedTableId(pExec->execHandle.task, tbUidList, isAdd); ASSERT(code == 0); } else if (pExec->execHandle.subType == TOPIC_SUB_TYPE__DB) { if (!isAdd) { diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 42fb5c329db4ca2ccbda82eab1f1e353e9968e3b..522bf46aa1fb9d28225f3118b9b6e1bed7a6cd75 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -17,7 +17,7 @@ #include "tmsg.h" #include "tq.h" -int32_t tdBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBlock* pDataBlock, +int32_t tqBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq) { ASSERT(pDataBlock->info.type == STREAM_DELETE_RESULT); int32_t totRow = pDataBlock->info.rows; @@ -25,8 +25,7 @@ int32_t tdBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBl SColumnInfoData* pGidCol = taosArrayGet(pDataBlock->pDataBlock, GROUPID_COLUMN_INDEX); for (int32_t row = 0; row < totRow; row++) { int64_t ts = *(int64_t*)colDataGetData(pTsCol, row); - /*int64_t groupId = *(int64_t*)colDataGetData(pGidCol, row);*/ - int64_t groupId = 0; + int64_t groupId = *(int64_t*)colDataGetData(pGidCol, row); char* name = buildCtbNameByGroupId(stbFullName, groupId); tqDebug("stream delete msg: groupId :%ld, name: %s", groupId, name); SMetaReader mr = {0}; @@ -49,8 +48,8 @@ int32_t tdBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBl return 0; } -SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pTSchema, bool createTb, - int64_t suid, const char* stbFullName, int32_t vgId, SBatchDeleteReq* pDeleteReq) { +SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pTSchema, bool createTb, + int64_t suid, const char* stbFullName, SBatchDeleteReq* pDeleteReq) { SSubmitReq* ret = NULL; SArray* schemaReqs = NULL; SArray* schemaReqSz = NULL; @@ -69,9 +68,10 @@ SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); if (pDataBlock->info.type == STREAM_DELETE_RESULT) { int32_t padding1 = 0; - void* padding2 = taosMemoryMalloc(1); + void* padding2 = NULL; taosArrayPush(schemaReqSz, &padding1); taosArrayPush(schemaReqs, &padding2); + continue; } STagVal tagVal = { @@ -139,8 +139,7 @@ SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem continue; } int32_t rows = pDataBlock->info.rows; - // TODO min - int32_t rowSize = pDataBlock->info.rowSize; + /*int32_t rowSize = pDataBlock->info.rowSize;*/ int32_t maxLen = TD_ROW_MAX_BYTES_FROM_SCHEMA(pTSchema); int32_t schemaLen = 0; @@ -151,9 +150,8 @@ SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem } // assign data - // TODO ret = rpcMallocCont(cap); - ret->header.vgId = vgId; + ret->header.vgId = pVnode->config.vgId; ret->length = sizeof(SSubmitReq); ret->numOfBlocks = htonl(sz); @@ -162,13 +160,12 @@ SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); if (pDataBlock->info.type == STREAM_DELETE_RESULT) { pDeleteReq->suid = suid; - tdBuildDeleteReq(pVnode, stbFullName, pDataBlock, pDeleteReq); + tqBuildDeleteReq(pVnode, stbFullName, pDataBlock, pDeleteReq); continue; } blkHead->numOfRows = htonl(pDataBlock->info.rows); blkHead->sversion = htonl(pTSchema->version); - // TODO blkHead->suid = htobe64(suid); // uid is assigned by vnode blkHead->uid = 0; @@ -234,34 +231,35 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) { ASSERT(pTask->tbSink.pTSchema); deleteReq.deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq)); - SSubmitReq* pReq = tdBlockToSubmit(pVnode, pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid, - pTask->tbSink.stbFullName, pVnode->config.vgId, &deleteReq); + SSubmitReq* submitReq = tqBlockToSubmit(pVnode, pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid, + pTask->tbSink.stbFullName, &deleteReq); tqDebug("vgId:%d, task %d convert blocks over, put into write-queue", TD_VID(pVnode), pTask->taskId); - int32_t code; - int32_t len; - tEncodeSize(tEncodeSBatchDeleteReq, &deleteReq, len, code); - if (code < 0) { - // - ASSERT(0); - } - SEncoder encoder; - void* buf = rpcMallocCont(len + sizeof(SMsgHead)); - void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead)); - tEncoderInit(&encoder, abuf, len); - tEncodeSBatchDeleteReq(&encoder, &deleteReq); - tEncoderClear(&encoder); + if (taosArrayGetSize(deleteReq.deleteReqs) != 0) { + int32_t code; + int32_t len; + tEncodeSize(tEncodeSBatchDeleteReq, &deleteReq, len, code); + if (code < 0) { + // + ASSERT(0); + } + SEncoder encoder; + void* serializedDeleteReq = rpcMallocCont(len + sizeof(SMsgHead)); + void* abuf = POINTER_SHIFT(serializedDeleteReq, sizeof(SMsgHead)); + tEncoderInit(&encoder, abuf, len); + tEncodeSBatchDeleteReq(&encoder, &deleteReq); + tEncoderClear(&encoder); - ((SMsgHead*)buf)->vgId = pVnode->config.vgId; + ((SMsgHead*)serializedDeleteReq)->vgId = pVnode->config.vgId; - if (taosArrayGetSize(deleteReq.deleteReqs) != 0) { SRpcMsg msg = { .msgType = TDMT_VND_BATCH_DEL, - .pCont = buf, + .pCont = serializedDeleteReq, .contLen = len + sizeof(SMsgHead), }; if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) { + rpcFreeCont(serializedDeleteReq); tqDebug("failed to put into write-queue since %s", terrstr()); } } @@ -271,11 +269,12 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) { // build write msg SRpcMsg msg = { .msgType = TDMT_VND_SUBMIT, - .pCont = pReq, - .contLen = ntohl(pReq->length), + .pCont = submitReq, + .contLen = ntohl(submitReq->length), }; if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) { + rpcFreeCont(submitReq); tqDebug("failed to put into write-queue since %s", terrstr()); } } diff --git a/source/dnode/vnode/src/tq/tqSnapshot.c b/source/dnode/vnode/src/tq/tqSnapshot.c index b4a7ce7737e5f599dac051f33e275918e7709c0d..c52e0e2c098478c0fc055306ad2eda9ced09dfea 100644 --- a/source/dnode/vnode/src/tq/tqSnapshot.c +++ b/source/dnode/vnode/src/tq/tqSnapshot.c @@ -165,9 +165,9 @@ int32_t tqSnapWriterClose(STqSnapWriter** ppWriter, int8_t rollback) { STQ* pTq = pWriter->pTq; if (rollback) { - ASSERT(0); + tdbAbort(pWriter->pTq->pMetaDB, &pWriter->txn); } else { - code = tdbCommit(pWriter->pTq->pMetaStore, &pWriter->txn); + code = tdbCommit(pWriter->pTq->pMetaDB, &pWriter->txn); if (code) goto _err; } diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index f03b02af27bcef4a0eed50d49ae597d20ba29f18..61c68775559ebcbaca1853fdadecaaa1456c170c 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -33,16 +33,21 @@ int32_t tsdbOpenCache(STsdb *pTsdb) { taosLRUCacheSetStrictCapacity(pCache, true); + taosThreadMutexInit(&pTsdb->lruMutex, NULL); + _err: pTsdb->lruCache = pCache; return code; } -void tsdbCloseCache(SLRUCache *pCache) { +void tsdbCloseCache(STsdb *pTsdb) { + SLRUCache *pCache = pTsdb->lruCache; if (pCache) { taosLRUCacheEraseUnrefEntries(pCache); taosLRUCacheCleanup(pCache); + + taosThreadMutexDestroy(&pTsdb->lruMutex); } } @@ -261,14 +266,14 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb } for (++iCol; iCol < nCol; ++iCol) { - SLastCol *tTsVal = (SLastCol *)taosArrayGet(pLast, iCol); - if (keyTs >= tTsVal->ts) { - SColVal *tColVal = &tTsVal->colVal; + SLastCol *tTsVal1 = (SLastCol *)taosArrayGet(pLast, iCol); + if (keyTs >= tTsVal1->ts) { + SColVal *tColVal = &tTsVal1->colVal; SColVal colVal = {0}; tTSRowGetVal(row, pTSchema, iCol, &colVal); if (colVal.isNone || colVal.isNull) { - if (keyTs == tTsVal->ts && !tColVal->isNone && !tColVal->isNull) { + if (keyTs == tTsVal1->ts && !tColVal->isNone && !tColVal->isNull) { invalidate = true; break; @@ -279,6 +284,7 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb } } + _invalidate: taosMemoryFreeClear(pTSchema); taosLRUCacheRelease(pCache, h, invalidate); @@ -317,7 +323,7 @@ static int32_t getTableDelDataFromDelIdx(SDelFReader *pDelReader, SDelIdx *pDelI int32_t code = 0; if (pDelIdx) { - code = tsdbReadDelData(pDelReader, pDelIdx, aDelData, NULL); + code = tsdbReadDelData(pDelReader, pDelIdx, aDelData); } return code; @@ -388,8 +394,7 @@ static int32_t getTableDelIdx(SDelFReader *pDelFReader, tb_uid_t suid, tb_uid_t SDelIdx idx = {.suid = suid, .uid = uid}; // tMapDataReset(&delIdxMap); - // code = tsdbReadDelIdx(pDelFReader, &delIdxMap, NULL); - code = tsdbReadDelIdx(pDelFReader, pDelIdxArray, NULL); + code = tsdbReadDelIdx(pDelFReader, pDelIdxArray); if (code) goto _err; // code = tMapDataSearch(&delIdxMap, &idx, tGetDelIdx, tCmprDelIdx, pDelIdx); @@ -405,6 +410,182 @@ _err: return code; } +typedef enum { + SFSLASTNEXTROW_FS, + SFSLASTNEXTROW_FILESET, + SFSLASTNEXTROW_BLOCKDATA, + SFSLASTNEXTROW_BLOCKROW +} SFSLASTNEXTROWSTATES; + +typedef struct { + SFSLASTNEXTROWSTATES state; // [input] + STsdb *pTsdb; // [input] + SBlockIdx *pBlockIdxExp; // [input] + STSchema *pTSchema; // [input] + tb_uid_t suid; + tb_uid_t uid; + int32_t nFileSet; + int32_t iFileSet; + SArray *aDFileSet; + SDataFReader *pDataFReader; + SArray *aBlockL; + SBlockL *pBlockL; + SBlockData *pBlockDataL; + SBlockData blockDataL; + int32_t nRow; + int32_t iRow; + TSDBROW row; + /* + SArray *aBlockIdx; + SBlockIdx *pBlockIdx; + SMapData blockMap; + int32_t nBlock; + int32_t iBlock; + SBlock block; + */ +} SFSLastNextRowIter; + +static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) { + SFSLastNextRowIter *state = (SFSLastNextRowIter *)iter; + int32_t code = 0; + + switch (state->state) { + case SFSLASTNEXTROW_FS: + // state->aDFileSet = state->pTsdb->pFS->cState->aDFileSet; + state->nFileSet = taosArrayGetSize(state->aDFileSet); + state->iFileSet = state->nFileSet; + + state->pBlockDataL = NULL; + + case SFSLASTNEXTROW_FILESET: { + SDFileSet *pFileSet = NULL; + _next_fileset: + if (--state->iFileSet >= 0) { + pFileSet = (SDFileSet *)taosArrayGet(state->aDFileSet, state->iFileSet); + } else { + if (state->pBlockDataL) { + tBlockDataDestroy(state->pBlockDataL, 1); + state->pBlockDataL = NULL; + } + + *ppRow = NULL; + return code; + } + + code = tsdbDataFReaderOpen(&state->pDataFReader, state->pTsdb, pFileSet); + if (code) goto _err; + + if (!state->aBlockL) { + state->aBlockL = taosArrayInit(0, sizeof(SBlockL)); + } else { + taosArrayClear(state->aBlockL); + } + + code = tsdbReadBlockL(state->pDataFReader, state->aBlockL); + if (code) goto _err; + + // SBlockL *pBlockL = (SBlockL *)taosArrayGet(state->aBlockL, state->iBlockL); + + state->pBlockL = taosArraySearch(state->aBlockL, state->pBlockIdxExp, tCmprBlockL, TD_EQ); + if (!state->pBlockL) { + goto _next_fileset; + } + + int64_t suid = state->pBlockL->suid; + int64_t uid = state->pBlockL->maxUid; + + if (!state->pBlockDataL) { + state->pBlockDataL = &state->blockDataL; + + tBlockDataCreate(state->pBlockDataL); + } + code = tBlockDataInit(state->pBlockDataL, suid, suid ? 0 : uid, state->pTSchema); + if (code) goto _err; + } + case SFSLASTNEXTROW_BLOCKDATA: + code = tsdbReadLastBlock(state->pDataFReader, state->pBlockL, state->pBlockDataL); + if (code) goto _err; + + state->nRow = state->blockDataL.nRow; + state->iRow = state->nRow - 1; + + if (!state->pBlockDataL->uid) { + while (state->pBlockIdxExp->uid != state->pBlockDataL->aUid[state->iRow]) { + --state->iRow; + } + } + + state->state = SFSLASTNEXTROW_BLOCKROW; + case SFSLASTNEXTROW_BLOCKROW: + if (state->pBlockDataL->uid) { + if (state->iRow >= 0) { + state->row = tsdbRowFromBlockData(state->pBlockDataL, state->iRow); + *ppRow = &state->row; + + if (--state->iRow < 0) { + state->state = SFSLASTNEXTROW_FILESET; + } + } + } else { + if (state->iRow >= 0 && state->pBlockIdxExp->uid == state->pBlockDataL->aUid[state->iRow]) { + state->row = tsdbRowFromBlockData(state->pBlockDataL, state->iRow); + *ppRow = &state->row; + + if (--state->iRow < 0 || state->pBlockIdxExp->uid != state->pBlockDataL->aUid[state->iRow]) { + state->state = SFSLASTNEXTROW_FILESET; + } + } + } + + return code; + default: + ASSERT(0); + break; + } + +_err: + if (state->pDataFReader) { + tsdbDataFReaderClose(&state->pDataFReader); + state->pDataFReader = NULL; + } + if (state->aBlockL) { + taosArrayDestroy(state->aBlockL); + state->aBlockL = NULL; + } + if (state->pBlockDataL) { + tBlockDataDestroy(state->pBlockDataL, 1); + state->pBlockDataL = NULL; + } + + *ppRow = NULL; + + return code; +} + +int32_t clearNextRowFromFSLast(void *iter) { + SFSLastNextRowIter *state = (SFSLastNextRowIter *)iter; + int32_t code = 0; + + if (!state) { + return code; + } + + if (state->pDataFReader) { + tsdbDataFReaderClose(&state->pDataFReader); + state->pDataFReader = NULL; + } + if (state->aBlockL) { + taosArrayDestroy(state->aBlockL); + state->aBlockL = NULL; + } + if (state->pBlockDataL) { + tBlockDataDestroy(state->pBlockDataL, 1); + state->pBlockDataL = NULL; + } + + return code; +} + typedef enum SFSNEXTROWSTATES { SFSNEXTROW_FS, SFSNEXTROW_FILESET, @@ -416,6 +597,9 @@ typedef struct SFSNextRowIter { SFSNEXTROWSTATES state; // [input] STsdb *pTsdb; // [input] SBlockIdx *pBlockIdxExp; // [input] + STSchema *pTSchema; // [input] + tb_uid_t suid; + tb_uid_t uid; int32_t nFileSet; int32_t iFileSet; SArray *aDFileSet; @@ -451,9 +635,9 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { if (--state->iFileSet >= 0) { pFileSet = (SDFileSet *)taosArrayGet(state->aDFileSet, state->iFileSet); } else { - // tBlockDataClear(&state->blockData, 1); + // tBlockDataDestroy(&state->blockData, 1); if (state->pBlockData) { - tBlockDataClear(state->pBlockData, 1); + tBlockDataDestroy(state->pBlockData, 1); state->pBlockData = NULL; } @@ -465,13 +649,12 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { if (code) goto _err; // tMapDataReset(&state->blockIdxMap); - // code = tsdbReadBlockIdx(state->pDataFReader, &state->blockIdxMap, NULL); if (!state->aBlockIdx) { state->aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx)); } else { taosArrayClear(state->aBlockIdx); } - code = tsdbReadBlockIdx(state->pDataFReader, state->aBlockIdx, NULL); + code = tsdbReadBlockIdx(state->pDataFReader, state->aBlockIdx); if (code) goto _err; /* if (state->pBlockIdx) { */ @@ -487,8 +670,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { } tMapDataReset(&state->blockMap); - code = tsdbReadBlock(state->pDataFReader, state->pBlockIdx, &state->blockMap, NULL); - /* code = tsdbReadBlock(state->pDataFReader, &state->blockIdx, &state->blockMap, NULL); */ + code = tsdbReadBlock(state->pDataFReader, state->pBlockIdx, &state->blockMap); if (code) goto _err; state->nBlock = state->blockMap.nItem; @@ -497,7 +679,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { if (!state->pBlockData) { state->pBlockData = &state->blockData; - tBlockDataInit(&state->blockData); + tBlockDataCreate(&state->blockData); } } case SFSNEXTROW_BLOCKDATA: @@ -510,7 +692,11 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { tMapDataGetItemByIdx(&state->blockMap, state->iBlock, &block, tGetBlock); /* code = tsdbReadBlockData(state->pDataFReader, &state->blockIdx, &block, &state->blockData, NULL, NULL); */ - code = tsdbReadBlockData(state->pDataFReader, state->pBlockIdx, &block, state->pBlockData, NULL, NULL); + tBlockDataReset(state->pBlockData); + code = tBlockDataInit(state->pBlockData, state->suid, state->uid, state->pTSchema); + if (code) goto _err; + + code = tsdbReadDataBlock(state->pDataFReader, &block, state->pBlockData); if (code) goto _err; state->nRow = state->blockData.nRow; @@ -555,8 +741,8 @@ _err: state->aBlockIdx = NULL; } if (state->pBlockData) { - // tBlockDataClear(&state->blockData, 1); - tBlockDataClear(state->pBlockData, 1); + // tBlockDataDestroy(&state->blockData, 1); + tBlockDataDestroy(state->pBlockData, 1); state->pBlockData = NULL; } @@ -582,8 +768,8 @@ int32_t clearNextRowFromFS(void *iter) { state->aBlockIdx = NULL; } if (state->pBlockData) { - // tBlockDataClear(&state->blockData, 1); - tBlockDataClear(state->pBlockData, 1); + // tBlockDataDestroy(&state->blockData, 1); + tBlockDataDestroy(state->pBlockData, 1); state->pBlockData = NULL; } @@ -725,18 +911,19 @@ typedef struct { SArray *pSkyline; int64_t iSkyline; - SBlockIdx idx; - SMemNextRowIter memState; - SMemNextRowIter imemState; - SFSNextRowIter fsState; - TSDBROW memRow, imemRow, fsRow; + SBlockIdx idx; + SMemNextRowIter memState; + SMemNextRowIter imemState; + SFSLastNextRowIter fsLastState; + SFSNextRowIter fsState; + TSDBROW memRow, imemRow, fsLastRow, fsRow; - TsdbNextRowState input[3]; + TsdbNextRowState input[4]; STsdbReadSnap *pReadSnap; STsdb *pTsdb; } CacheNextRowIter; -static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTsdb) { +static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTsdb, STSchema *pTSchema) { int code = 0; tb_uid_t suid = getTableSuidByUid(uid, pTsdb); @@ -745,12 +932,12 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs STbData *pMem = NULL; if (pIter->pReadSnap->pMem) { - tsdbGetTbDataFromMemTable(pIter->pReadSnap->pMem, suid, uid, &pMem); + pMem = tsdbGetTbDataFromMemTable(pIter->pReadSnap->pMem, suid, uid); } STbData *pIMem = NULL; if (pIter->pReadSnap->pIMem) { - tsdbGetTbDataFromMemTable(pIter->pReadSnap->pIMem, suid, uid, &pIMem); + pIMem = tsdbGetTbDataFromMemTable(pIter->pReadSnap->pIMem, suid, uid); } pIter->pTsdb = pTsdb; @@ -763,7 +950,7 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs if (pDelFile) { SDelFReader *pDelFReader; - code = tsdbDelFReaderOpen(&pDelFReader, pDelFile, pTsdb, NULL); + code = tsdbDelFReaderOpen(&pDelFReader, pDelFile, pTsdb); if (code) goto _err; code = getTableDelIdx(pDelFReader, suid, uid, &delIdx); @@ -782,14 +969,27 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs pIter->idx = (SBlockIdx){.suid = suid, .uid = uid}; + pIter->fsLastState.state = (SFSLASTNEXTROWSTATES)SFSNEXTROW_FS; + pIter->fsLastState.pTsdb = pTsdb; + pIter->fsLastState.aDFileSet = pIter->pReadSnap->fs.aDFileSet; + pIter->fsLastState.pBlockIdxExp = &pIter->idx; + pIter->fsLastState.pTSchema = pTSchema; + pIter->fsLastState.suid = suid; + pIter->fsLastState.uid = uid; + pIter->fsState.state = SFSNEXTROW_FS; pIter->fsState.pTsdb = pTsdb; pIter->fsState.aDFileSet = pIter->pReadSnap->fs.aDFileSet; pIter->fsState.pBlockIdxExp = &pIter->idx; + pIter->fsState.pTSchema = pTSchema; + pIter->fsState.suid = suid; + pIter->fsState.uid = uid; pIter->input[0] = (TsdbNextRowState){&pIter->memRow, true, false, &pIter->memState, getNextRowFromMem, NULL}; pIter->input[1] = (TsdbNextRowState){&pIter->imemRow, true, false, &pIter->imemState, getNextRowFromMem, NULL}; - pIter->input[2] = + pIter->input[2] = (TsdbNextRowState){&pIter->fsLastRow, false, true, &pIter->fsLastState, getNextRowFromFSLast, + clearNextRowFromFSLast}; + pIter->input[3] = (TsdbNextRowState){&pIter->fsRow, false, true, &pIter->fsState, getNextRowFromFS, clearNextRowFromFS}; if (pMem) { @@ -814,7 +1014,7 @@ _err: static int32_t nextRowIterClose(CacheNextRowIter *pIter) { int code = 0; - for (int i = 0; i < 3; ++i) { + for (int i = 0; i < 4; ++i) { if (pIter->input[i].nextRowClearFn) { pIter->input[i].nextRowClearFn(pIter->input[i].iter); } @@ -826,7 +1026,6 @@ static int32_t nextRowIterClose(CacheNextRowIter *pIter) { tsdbUntakeReadSnap(pIter->pTsdb, pIter->pReadSnap); - return code; _err: return code; } @@ -835,7 +1034,7 @@ _err: static int32_t nextRowIterGet(CacheNextRowIter *pIter, TSDBROW **ppRow) { int code = 0; - for (int i = 0; i < 3; ++i) { + for (int i = 0; i < 4; ++i) { if (pIter->input[i].next && !pIter->input[i].stop) { code = pIter->input[i].nextRowFn(pIter->input[i].iter, &pIter->input[i].pRow); if (code) goto _err; @@ -847,18 +1046,18 @@ static int32_t nextRowIterGet(CacheNextRowIter *pIter, TSDBROW **ppRow) { } } - if (pIter->input[0].stop && pIter->input[1].stop && pIter->input[2].stop) { + if (pIter->input[0].stop && pIter->input[1].stop && pIter->input[2].stop && pIter->input[3].stop) { *ppRow = NULL; return code; } - // select maxpoint(s) from mem, imem, fs - TSDBROW *max[3] = {0}; - int iMax[3] = {-1, -1, -1}; + // select maxpoint(s) from mem, imem, fs and last + TSDBROW *max[4] = {0}; + int iMax[4] = {-1, -1, -1, -1}; int nMax = 0; TSKEY maxKey = TSKEY_MIN; - for (int i = 0; i < 3; ++i) { + for (int i = 0; i < 4; ++i) { if (!pIter->input[i].stop && pIter->input[i].pRow != NULL) { TSDBKEY key = TSDBROW_KEY(pIter->input[i].pRow); @@ -876,13 +1075,13 @@ static int32_t nextRowIterGet(CacheNextRowIter *pIter, TSDBROW **ppRow) { } // delete detection - TSDBROW *merge[3] = {0}; - int iMerge[3] = {-1, -1, -1}; + TSDBROW *merge[4] = {0}; + int iMerge[4] = {-1, -1, -1, -1}; int nMerge = 0; for (int i = 0; i < nMax; ++i) { - TSDBKEY maxKey = TSDBROW_KEY(max[i]); + TSDBKEY maxKey1 = TSDBROW_KEY(max[i]); - bool deleted = tsdbKeyDeleted(&maxKey, pIter->pSkyline, &pIter->iSkyline); + bool deleted = tsdbKeyDeleted(&maxKey1, pIter->pSkyline, &pIter->iSkyline); if (!deleted) { iMerge[nMerge] = iMax[i]; merge[nMerge++] = max[i]; @@ -918,7 +1117,7 @@ static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, STSRow **ppRo TSKEY lastRowTs = TSKEY_MAX; CacheNextRowIter iter = {0}; - nextRowIterOpen(&iter, uid, pTsdb); + nextRowIterOpen(&iter, uid, pTsdb, pTSchema); do { TSDBROW *pRow = NULL; @@ -1015,7 +1214,7 @@ static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray) { TSKEY lastRowTs = TSKEY_MAX; CacheNextRowIter iter = {0}; - nextRowIterOpen(&iter, uid, pTsdb); + nextRowIterOpen(&iter, uid, pTsdb, pTSchema); do { TSDBROW *pRow = NULL; @@ -1100,29 +1299,40 @@ int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUH // getTableCacheKeyS(uid, "lr", key, &keyLen); getTableCacheKey(uid, 0, key, &keyLen); LRUHandle *h = taosLRUCacheLookup(pCache, key, keyLen); - if (h) { - } else { - STSRow *pRow = NULL; - bool dup = false; // which is always false for now - code = mergeLastRow(uid, pTsdb, &dup, &pRow); - // if table's empty or error, return code of -1 - if (code < 0 || pRow == NULL) { - if (!dup && pRow) { - taosMemoryFree(pRow); + if (!h) { + taosThreadMutexLock(&pTsdb->lruMutex); + + h = taosLRUCacheLookup(pCache, key, keyLen); + if (!h) { + STSRow *pRow = NULL; + bool dup = false; // which is always false for now + code = mergeLastRow(uid, pTsdb, &dup, &pRow); + // if table's empty or error, return code of -1 + if (code < 0 || pRow == NULL) { + if (!dup && pRow) { + taosMemoryFree(pRow); + } + + taosThreadMutexUnlock(&pTsdb->lruMutex); + + *handle = NULL; + + return 0; } - *handle = NULL; - return 0; - } + _taos_lru_deleter_t deleter = deleteTableCacheLastrow; + LRUStatus status = + taosLRUCacheInsert(pCache, key, keyLen, pRow, TD_ROW_LEN(pRow), deleter, NULL, TAOS_LRU_PRIORITY_LOW); + if (status != TAOS_LRU_STATUS_OK) { + code = -1; + } - _taos_lru_deleter_t deleter = deleteTableCacheLastrow; - LRUStatus status = - taosLRUCacheInsert(pCache, key, keyLen, pRow, TD_ROW_LEN(pRow), deleter, NULL, TAOS_LRU_PRIORITY_LOW); - if (status != TAOS_LRU_STATUS_OK) { - code = -1; - } + taosThreadMutexUnlock(&pTsdb->lruMutex); - h = taosLRUCacheLookup(pCache, key, keyLen); + h = taosLRUCacheLookup(pCache, key, keyLen); + } else { + taosThreadMutexUnlock(&pTsdb->lruMutex); + } } *handle = h; diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index 66843d9a2844c44e77e798ab47032ef75370a544..ea9a7ec7d9b3df80edbb1e5f93db5b2420f908e5 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -18,7 +18,7 @@ #include "tcommon.h" #include "tsdb.h" -typedef struct SLastrowReader { +typedef struct SCacheRowsReader { SVnode* pVnode; STSchema* pSchema; uint64_t uid; @@ -27,9 +27,9 @@ typedef struct SLastrowReader { int32_t type; int32_t tableIndex; // currently returned result tables SArray* pTableList; // table id list -} SLastrowReader; +} SCacheRowsReader; -static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SLastrowReader* pReader, const int32_t* slotIds) { +static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SCacheRowsReader* pReader, const int32_t* slotIds) { ASSERT(pReader->numOfCols <= taosArrayGetSize(pBlock->pDataBlock)); int32_t numOfRows = pBlock->info.rows; @@ -61,8 +61,10 @@ static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SLastrowReader* pReade pBlock->info.rows += 1; } -int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t numOfCols, void** pReader) { - SLastrowReader* p = taosMemoryCalloc(1, sizeof(SLastrowReader)); +int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t numOfCols, void** pReader) { + *pReader = NULL; + + SCacheRowsReader* p = taosMemoryCalloc(1, sizeof(SCacheRowsReader)); if (p == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -81,9 +83,17 @@ int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, p->pTableList = pTableIdList; p->transferBuf = taosMemoryCalloc(p->pSchema->numOfCols, POINTER_BYTES); + if (p->transferBuf == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) { if (IS_VAR_DATA_TYPE(p->pSchema->columns[i].type)) { p->transferBuf[i] = taosMemoryMalloc(p->pSchema->columns[i].bytes); + if (p->transferBuf[i] == NULL) { + tsdbCacherowsReaderClose(p); + return TSDB_CODE_OUT_OF_MEMORY; + } } } @@ -91,8 +101,8 @@ int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, return TSDB_CODE_SUCCESS; } -int32_t tsdbLastrowReaderClose(void* pReader) { - SLastrowReader* p = pReader; +int32_t tsdbCacherowsReaderClose(void* pReader) { + SCacheRowsReader* p = pReader; if (p->pSchema != NULL) { for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) { @@ -107,28 +117,56 @@ int32_t tsdbLastrowReaderClose(void* pReader) { return TSDB_CODE_SUCCESS; } -int32_t tsdbRetrieveLastRow(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, SArray* pTableUidList) { +static int32_t doExtractCacheRow(SCacheRowsReader* pr, SLRUCache* lruCache, uint64_t uid, STSRow** pRow, LRUHandle** h) { + int32_t code = TSDB_CODE_SUCCESS; + if ((pr->type & CACHESCAN_RETRIEVE_LAST_ROW) == CACHESCAN_RETRIEVE_LAST_ROW) { + code = tsdbCacheGetLastrowH(lruCache, uid, pr->pVnode->pTsdb, h); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + // no data in the table of Uid + if (*h != NULL) { + *pRow = (STSRow*)taosLRUCacheValue(lruCache, *h); + } + } else { + code = tsdbCacheGetLastH(lruCache, uid, pr->pVnode->pTsdb, h); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + // no data in the table of Uid + if (*h != NULL) { + SArray* pLast = (SArray*)taosLRUCacheValue(lruCache, *h); + tsdbCacheLastArray2Row(pLast, pRow, pr->pSchema); + } + } + + return code; +} + +int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, SArray* pTableUidList) { if (pReader == NULL || pResBlock == NULL) { return TSDB_CODE_INVALID_PARA; } - SLastrowReader* pr = pReader; + SCacheRowsReader* pr = pReader; + int32_t code = TSDB_CODE_SUCCESS; SLRUCache* lruCache = pr->pVnode->pTsdb->lruCache; LRUHandle* h = NULL; STSRow* pRow = NULL; size_t numOfTables = taosArrayGetSize(pr->pTableList); // retrieve the only one last row of all tables in the uid list. - if (pr->type == LASTROW_RETRIEVE_TYPE_SINGLE) { + if ((pr->type & CACHESCAN_RETRIEVE_TYPE_SINGLE) == CACHESCAN_RETRIEVE_TYPE_SINGLE) { int64_t lastKey = INT64_MIN; bool internalResult = false; for (int32_t i = 0; i < numOfTables; ++i) { STableKeyInfo* pKeyInfo = taosArrayGet(pr->pTableList, i); - int32_t code = tsdbCacheGetLastrowH(lruCache, pKeyInfo->uid, pr->pVnode->pTsdb, &h); - // int32_t code = tsdbCacheGetLastH(lruCache, pKeyInfo->uid, pr->pVnode->pTsdb, &h); - if (code != TSDB_CODE_SUCCESS) { + code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h); + if (code != TSDB_CODE_SUCCESS) { return code; } @@ -136,9 +174,6 @@ int32_t tsdbRetrieveLastRow(void* pReader, SSDataBlock* pResBlock, const int32_t continue; } - pRow = (STSRow*)taosLRUCacheValue(lruCache, h); - // SArray* pLast = (SArray*)taosLRUCacheValue(lruCache, h); - // tsdbCacheLastArray2Row(pLast, &pRow, pr->pSchema); if (pRow->ts > lastKey) { // Set result row into the same rowIndex repeatly, so we need to check if the internal result row has already // appended or not. @@ -155,25 +190,18 @@ int32_t tsdbRetrieveLastRow(void* pReader, SSDataBlock* pResBlock, const int32_t tsdbCacheRelease(lruCache, h); } - } else if (pr->type == LASTROW_RETRIEVE_TYPE_ALL) { + } else if ((pr->type & CACHESCAN_RETRIEVE_TYPE_ALL) == CACHESCAN_RETRIEVE_TYPE_ALL) { for (int32_t i = pr->tableIndex; i < numOfTables; ++i) { STableKeyInfo* pKeyInfo = taosArrayGet(pr->pTableList, i); - - int32_t code = tsdbCacheGetLastrowH(lruCache, pKeyInfo->uid, pr->pVnode->pTsdb, &h); - // int32_t code = tsdbCacheGetLastH(lruCache, pKeyInfo->uid, pr->pVnode->pTsdb, &h); - if (code != TSDB_CODE_SUCCESS) { + code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h); + if (code != TSDB_CODE_SUCCESS) { return code; } - // no data in the table of Uid if (h == NULL) { continue; } - pRow = (STSRow*)taosLRUCacheValue(lruCache, h); - // SArray* pLast = (SArray*)taosLRUCacheValue(lruCache, h); - // tsdbCacheLastArray2Row(pLast, &pRow, pr->pSchema); - saveOneRow(pRow, pResBlock, pr, slotIds); taosArrayPush(pTableUidList, &pKeyInfo->uid); diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c index 6e25166203948abe314e2405af77597ee9032a38..04a6de8472ade43b3e7e2b420f8e2d0c0656a2c8 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c @@ -20,6 +20,12 @@ typedef struct { STSchema *pTSchema; } SSkmInfo; +typedef struct { + int64_t suid; + int64_t uid; + TSDBROW row; +} SRowInfo; + typedef struct { STsdb *pTsdb; /* commit data */ @@ -29,6 +35,7 @@ typedef struct { int32_t minRow; int32_t maxRow; int8_t cmprAlg; + SArray *aTbDataP; STsdbFS fs; // -------------- TSKEY nextKey; // reset by each table commit @@ -38,15 +45,27 @@ typedef struct { // commit file data struct { SDataFReader *pReader; - SArray *aBlockIdx; // SArray - SMapData mBlock; // SMapData, read from reader - SBlockData bData; + // data + SArray *aBlockIdx; // SArray + int32_t iBlockIdx; + SBlockIdx *pBlockIdx; + SMapData mBlock; // SMapData + SBlockData bData; + // last + SArray *aBlockL; // SArray + int32_t iBlockL; + SBlockData bDatal; + int32_t iRow; + SRowInfo *pRowInfo; + SRowInfo rowInfo; } dReader; struct { SDataFWriter *pWriter; SArray *aBlockIdx; // SArray + SArray *aBlockL; // SArray SMapData mBlock; // SMapData SBlockData bData; + SBlockData bDatal; } dWriter; SSkmInfo skmTable; SSkmInfo skmRow; @@ -162,10 +181,10 @@ static int32_t tsdbCommitDelStart(SCommitter *pCommitter) { SDelFile *pDelFileR = pCommitter->fs.pDelFile; if (pDelFileR) { - code = tsdbDelFReaderOpen(&pCommitter->pDelFReader, pDelFileR, pTsdb, NULL); + code = tsdbDelFReaderOpen(&pCommitter->pDelFReader, pDelFileR, pTsdb); if (code) goto _err; - code = tsdbReadDelIdx(pCommitter->pDelFReader, pCommitter->aDelIdx, NULL); + code = tsdbReadDelIdx(pCommitter->pDelFReader, pCommitter->aDelIdx); if (code) goto _err; } @@ -202,7 +221,7 @@ static int32_t tsdbCommitTableDel(SCommitter *pCommitter, STbData *pTbData, SDel suid = pDelIdx->suid; uid = pDelIdx->uid; - code = tsdbReadDelData(pCommitter->pDelFReader, pDelIdx, pCommitter->aDelData, NULL); + code = tsdbReadDelData(pCommitter->pDelFReader, pDelIdx, pCommitter->aDelData); if (code) goto _err; } else { taosArrayClear(pCommitter->aDelData); @@ -222,7 +241,7 @@ static int32_t tsdbCommitTableDel(SCommitter *pCommitter, STbData *pTbData, SDel } // write - code = tsdbWriteDelData(pCommitter->pDelFWriter, pCommitter->aDelData, NULL, &delIdx); + code = tsdbWriteDelData(pCommitter->pDelFWriter, pCommitter->aDelData, &delIdx); if (code) goto _err; // put delIdx @@ -243,7 +262,7 @@ static int32_t tsdbCommitDelEnd(SCommitter *pCommitter) { int32_t code = 0; STsdb *pTsdb = pCommitter->pTsdb; - code = tsdbWriteDelIdx(pCommitter->pDelFWriter, pCommitter->aDelIdxN, NULL); + code = tsdbWriteDelIdx(pCommitter->pDelFWriter, pCommitter->aDelIdxN); if (code) goto _err; code = tsdbUpdateDelFileHdr(pCommitter->pDelFWriter); @@ -271,44 +290,181 @@ _err: return code; } +static int32_t tsdbCommitterUpdateTableSchema(SCommitter *pCommitter, int64_t suid, int64_t uid) { + int32_t code = 0; + + if (suid) { + if (pCommitter->skmTable.suid == suid) goto _exit; + } else { + if (pCommitter->skmTable.uid == uid) goto _exit; + } + + pCommitter->skmTable.suid = suid; + pCommitter->skmTable.uid = uid; + tTSchemaDestroy(pCommitter->skmTable.pTSchema); + code = metaGetTbTSchemaEx(pCommitter->pTsdb->pVnode->pMeta, suid, uid, -1, &pCommitter->skmTable.pTSchema); + if (code) goto _exit; + +_exit: + return code; +} + +static int32_t tsdbCommitterUpdateRowSchema(SCommitter *pCommitter, int64_t suid, int64_t uid, int32_t sver) { + int32_t code = 0; + + if (pCommitter->skmRow.pTSchema) { + if (pCommitter->skmRow.suid == suid) { + if (suid == 0) { + if (pCommitter->skmRow.uid == uid && sver == pCommitter->skmRow.pTSchema->version) goto _exit; + } else { + if (sver == pCommitter->skmRow.pTSchema->version) goto _exit; + } + } + } + + pCommitter->skmRow.suid = suid; + pCommitter->skmRow.uid = uid; + tTSchemaDestroy(pCommitter->skmRow.pTSchema); + code = metaGetTbTSchemaEx(pCommitter->pTsdb->pVnode->pMeta, suid, uid, sver, &pCommitter->skmRow.pTSchema); + if (code) { + goto _exit; + } + +_exit: + return code; +} + +static int32_t tsdbCommitterNextLastRow(SCommitter *pCommitter) { + int32_t code = 0; + + ASSERT(pCommitter->dReader.pReader); + ASSERT(pCommitter->dReader.pRowInfo); + + SBlockData *pBlockDatal = &pCommitter->dReader.bDatal; + pCommitter->dReader.iRow++; + if (pCommitter->dReader.iRow < pBlockDatal->nRow) { + if (pBlockDatal->uid) { + pCommitter->dReader.pRowInfo->uid = pBlockDatal->uid; + } else { + pCommitter->dReader.pRowInfo->uid = pBlockDatal->aUid[pCommitter->dReader.iRow]; + } + pCommitter->dReader.pRowInfo->row = tsdbRowFromBlockData(pBlockDatal, pCommitter->dReader.iRow); + } else { + pCommitter->dReader.iBlockL++; + if (pCommitter->dReader.iBlockL < taosArrayGetSize(pCommitter->dReader.aBlockL)) { + SBlockL *pBlockL = (SBlockL *)taosArrayGet(pCommitter->dReader.aBlockL, pCommitter->dReader.iBlockL); + int64_t suid = pBlockL->suid; + int64_t uid = pBlockL->maxUid; + + code = tsdbCommitterUpdateTableSchema(pCommitter, suid, uid); + if (code) goto _exit; + + code = tBlockDataInit(pBlockDatal, suid, suid ? 0 : uid, pCommitter->skmTable.pTSchema); + if (code) goto _exit; + + code = tsdbReadLastBlock(pCommitter->dReader.pReader, pBlockL, pBlockDatal); + if (code) goto _exit; + + pCommitter->dReader.iRow = 0; + pCommitter->dReader.pRowInfo->suid = pBlockDatal->suid; + if (pBlockDatal->uid) { + pCommitter->dReader.pRowInfo->uid = pBlockDatal->uid; + } else { + pCommitter->dReader.pRowInfo->uid = pBlockDatal->aUid[0]; + } + pCommitter->dReader.pRowInfo->row = tsdbRowFromBlockData(pBlockDatal, pCommitter->dReader.iRow); + } else { + pCommitter->dReader.pRowInfo = NULL; + } + } + +_exit: + return code; +} + +static int32_t tsdbCommitterNextTableData(SCommitter *pCommitter) { + int32_t code = 0; + + ASSERT(pCommitter->dReader.pBlockIdx); + + pCommitter->dReader.iBlockIdx++; + if (pCommitter->dReader.iBlockIdx < taosArrayGetSize(pCommitter->dReader.aBlockIdx)) { + pCommitter->dReader.pBlockIdx = + (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, pCommitter->dReader.iBlockIdx); + + code = tsdbReadBlock(pCommitter->dReader.pReader, pCommitter->dReader.pBlockIdx, &pCommitter->dReader.mBlock); + if (code) goto _exit; + + ASSERT(pCommitter->dReader.mBlock.nItem > 0); + } else { + pCommitter->dReader.pBlockIdx = NULL; + } + +_exit: + return code; +} + static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) { int32_t code = 0; STsdb *pTsdb = pCommitter->pTsdb; SDFileSet *pRSet = NULL; // memory + pCommitter->commitFid = tsdbKeyFid(pCommitter->nextKey, pCommitter->minutes, pCommitter->precision); + tsdbFidKeyRange(pCommitter->commitFid, pCommitter->minutes, pCommitter->precision, &pCommitter->minKey, + &pCommitter->maxKey); pCommitter->nextKey = TSKEY_MAX; - // old - taosArrayClear(pCommitter->dReader.aBlockIdx); - tMapDataReset(&pCommitter->dReader.mBlock); - tBlockDataReset(&pCommitter->dReader.bData); + // Reader pRSet = (SDFileSet *)taosArraySearch(pCommitter->fs.aDFileSet, &(SDFileSet){.fid = pCommitter->commitFid}, tDFileSetCmprFn, TD_EQ); if (pRSet) { code = tsdbDataFReaderOpen(&pCommitter->dReader.pReader, pTsdb, pRSet); if (code) goto _err; - code = tsdbReadBlockIdx(pCommitter->dReader.pReader, pCommitter->dReader.aBlockIdx, NULL); + // data + code = tsdbReadBlockIdx(pCommitter->dReader.pReader, pCommitter->dReader.aBlockIdx); if (code) goto _err; + + pCommitter->dReader.iBlockIdx = 0; + if (pCommitter->dReader.iBlockIdx < taosArrayGetSize(pCommitter->dReader.aBlockIdx)) { + pCommitter->dReader.pBlockIdx = + (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, pCommitter->dReader.iBlockIdx); + + code = tsdbReadBlock(pCommitter->dReader.pReader, pCommitter->dReader.pBlockIdx, &pCommitter->dReader.mBlock); + if (code) goto _err; + } else { + pCommitter->dReader.pBlockIdx = NULL; + } + tBlockDataReset(&pCommitter->dReader.bData); + + // last + code = tsdbReadBlockL(pCommitter->dReader.pReader, pCommitter->dReader.aBlockL); + if (code) goto _err; + + pCommitter->dReader.iBlockL = -1; + pCommitter->dReader.iRow = -1; + pCommitter->dReader.pRowInfo = &pCommitter->dReader.rowInfo; + tBlockDataReset(&pCommitter->dReader.bDatal); + code = tsdbCommitterNextLastRow(pCommitter); + if (code) goto _err; + } else { + pCommitter->dReader.pBlockIdx = NULL; + pCommitter->dReader.pRowInfo = NULL; } - // new + // Writer SHeadFile fHead; SDataFile fData; SLastFile fLast; SSmaFile fSma; SDFileSet wSet = {.pHeadF = &fHead, .pDataF = &fData, .pLastF = &fLast, .pSmaF = &fSma}; - - taosArrayClear(pCommitter->dWriter.aBlockIdx); - tMapDataReset(&pCommitter->dWriter.mBlock); - tBlockDataReset(&pCommitter->dWriter.bData); if (pRSet) { wSet.diskId = pRSet->diskId; wSet.fid = pCommitter->commitFid; - fHead = (SHeadFile){.commitID = pCommitter->commitID, .offset = 0, .size = 0}; + fHead = (SHeadFile){.commitID = pCommitter->commitID, .size = 0, .offset = 0}; fData = *pRSet->pDataF; - fLast = (SLastFile){.commitID = pCommitter->commitID, .size = 0}; + fLast = (SLastFile){.commitID = pCommitter->commitID, .size = 0, .offset = 0}; fSma = *pRSet->pSmaF; } else { SDiskID did = {0}; @@ -319,14 +475,20 @@ static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) { wSet.diskId = did; wSet.fid = pCommitter->commitFid; - fHead = (SHeadFile){.commitID = pCommitter->commitID, .offset = 0, .size = 0}; + fHead = (SHeadFile){.commitID = pCommitter->commitID, .size = 0, .offset = 0}; fData = (SDataFile){.commitID = pCommitter->commitID, .size = 0}; - fLast = (SLastFile){.commitID = pCommitter->commitID, .size = 0}; + fLast = (SLastFile){.commitID = pCommitter->commitID, .size = 0, .offset = 0}; fSma = (SSmaFile){.commitID = pCommitter->commitID, .size = 0}; } code = tsdbDataFWriterOpen(&pCommitter->dWriter.pWriter, pTsdb, &wSet); if (code) goto _err; + taosArrayClear(pCommitter->dWriter.aBlockIdx); + taosArrayClear(pCommitter->dWriter.aBlockL); + tMapDataReset(&pCommitter->dWriter.mBlock); + tBlockDataReset(&pCommitter->dWriter.bData); + tBlockDataReset(&pCommitter->dWriter.bDatal); + _exit: return code; @@ -335,200 +497,192 @@ _err: return code; } -static int32_t tsdbCommitterUpdateTableSchema(SCommitter *pCommitter, int64_t suid, int64_t uid, int32_t sver) { - int32_t code = 0; - - if (pCommitter->skmTable.pTSchema) { - if (pCommitter->skmTable.suid == suid) { - if (suid == 0) { - if (pCommitter->skmTable.uid == uid && sver == pCommitter->skmTable.pTSchema->version) goto _exit; - } else { - if (sver == pCommitter->skmTable.pTSchema->version) goto _exit; - } - } - } +static int32_t tsdbCommitDataBlock(SCommitter *pCommitter, SBlock *pBlock) { + int32_t code = 0; + SBlockData *pBlockData = &pCommitter->dWriter.bData; + SBlock block; - pCommitter->skmTable.suid = suid; - pCommitter->skmTable.uid = uid; - tTSchemaDestroy(pCommitter->skmTable.pTSchema); - code = metaGetTbTSchemaEx(pCommitter->pTsdb->pVnode->pMeta, suid, uid, sver, &pCommitter->skmTable.pTSchema); - if (code) goto _exit; + ASSERT(pBlockData->nRow > 0); -_exit: - return code; -} + if (pBlock) { + block = *pBlock; // as a subblock + } else { + tBlockReset(&block); // as a new block + } -static int32_t tsdbCommitterUpdateRowSchema(SCommitter *pCommitter, int64_t suid, int64_t uid, int32_t sver) { - int32_t code = 0; + // info + block.nRow += pBlockData->nRow; + for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) { + TSDBKEY key = {.ts = pBlockData->aTSKEY[iRow], .version = pBlockData->aVersion[iRow]}; - if (pCommitter->skmRow.pTSchema) { - if (pCommitter->skmRow.suid == suid) { - if (suid == 0) { - if (pCommitter->skmRow.uid == uid && sver == pCommitter->skmRow.pTSchema->version) goto _exit; - } else { - if (sver == pCommitter->skmRow.pTSchema->version) goto _exit; + if (iRow == 0) { + if (tsdbKeyCmprFn(&block.minKey, &key) > 0) { + block.minKey = key; + } + } else { + if (pBlockData->aTSKEY[iRow] == pBlockData->aTSKEY[iRow - 1]) { + block.hasDup = 1; } } - } - pCommitter->skmRow.suid = suid; - pCommitter->skmRow.uid = uid; - tTSchemaDestroy(pCommitter->skmRow.pTSchema); - code = metaGetTbTSchemaEx(pCommitter->pTsdb->pVnode->pMeta, suid, uid, sver, &pCommitter->skmRow.pTSchema); - if (code) { - goto _exit; + if (iRow == pBlockData->nRow - 1 && tsdbKeyCmprFn(&block.maxKey, &key) < 0) { + block.maxKey = key; + } + + block.minVer = TMIN(block.minVer, key.version); + block.maxVer = TMAX(block.maxVer, key.version); } -_exit: + // write + block.nSubBlock++; + code = tsdbWriteBlockData(pCommitter->dWriter.pWriter, pBlockData, &block.aSubBlock[block.nSubBlock - 1], + ((block.nSubBlock == 1) && !block.hasDup) ? &block.smaInfo : NULL, pCommitter->cmprAlg, 0); + if (code) goto _err; + + // put SBlock + code = tMapDataPutItem(&pCommitter->dWriter.mBlock, &block, tPutBlock); + if (code) goto _err; + + // clear + tBlockDataClear(pBlockData); + return code; -} -static int32_t tsdbCommitBlockData(SCommitter *pCommitter, SBlockData *pBlockData, SBlock *pBlock, SBlockIdx *pBlockIdx, - int8_t toDataOnly) { - int32_t code = 0; +_err: + tsdbError("vgId:%d tsdb commit data block failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code)); + return code; +} - if (pBlock->nSubBlock == 0) { - if (!toDataOnly && pBlockData->nRow < pCommitter->minRow) { - pBlock->last = 1; - } else { - pBlock->last = 0; - } +static int32_t tsdbCommitLastBlock(SCommitter *pCommitter) { + int32_t code = 0; + SBlockL blockL; + SBlockData *pBlockData = &pCommitter->dWriter.bDatal; + + ASSERT(pBlockData->nRow > 0); + + // info + blockL.suid = pBlockData->suid; + blockL.nRow = pBlockData->nRow; + blockL.minKey = TSKEY_MAX; + blockL.maxKey = TSKEY_MIN; + blockL.minVer = VERSION_MAX; + blockL.maxVer = VERSION_MIN; + for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) { + blockL.minKey = TMIN(blockL.minKey, pBlockData->aTSKEY[iRow]); + blockL.maxKey = TMAX(blockL.maxKey, pBlockData->aTSKEY[iRow]); + blockL.minVer = TMIN(blockL.minVer, pBlockData->aVersion[iRow]); + blockL.maxVer = TMAX(blockL.maxVer, pBlockData->aVersion[iRow]); } + blockL.minUid = pBlockData->uid ? pBlockData->uid : pBlockData->aUid[0]; + blockL.maxUid = pBlockData->uid ? pBlockData->uid : pBlockData->aUid[pBlockData->nRow - 1]; - code = - tsdbWriteBlockData(pCommitter->dWriter.pWriter, pBlockData, NULL, NULL, pBlockIdx, pBlock, pCommitter->cmprAlg); + // write + code = tsdbWriteBlockData(pCommitter->dWriter.pWriter, pBlockData, &blockL.bInfo, NULL, pCommitter->cmprAlg, 1); if (code) goto _err; - code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pBlock, tPutBlock); - if (code) goto _err; + // push SBlockL + if (taosArrayPush(pCommitter->dWriter.aBlockL, &blockL) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + + // clear + tBlockDataClear(pBlockData); return code; _err: + tsdbError("vgId:%d tsdb commit last block failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code)); return code; } -static int32_t tsdbMergeTableData(SCommitter *pCommitter, STbDataIter *pIter, SBlock *pBlockMerge, TSDBKEY toKey, - int8_t toDataOnly) { +static int32_t tsdbMergeCommitData(SCommitter *pCommitter, STbDataIter *pIter, SBlock *pBlock) { int32_t code = 0; - SBlockIdx *pBlockIdx = &(SBlockIdx){.suid = pIter->pTbData->suid, .uid = pIter->pTbData->uid}; - SBlockData *pBlockDataMerge = &pCommitter->dReader.bData; - SBlockData *pBlockData = &pCommitter->dWriter.bData; - SBlock block; - SBlock *pBlock = █ - TSDBROW *pRow1; - TSDBROW row2; - TSDBROW *pRow2 = &row2; + STbData *pTbData = pIter->pTbData; + SBlockData *pBlockDataR = &pCommitter->dReader.bData; + SBlockData *pBlockDataW = &pCommitter->dWriter.bData; - // read SBlockData - code = tsdbReadBlockData(pCommitter->dReader.pReader, pBlockIdx, pBlockMerge, pBlockDataMerge, NULL, NULL); + code = tsdbReadDataBlock(pCommitter->dReader.pReader, pBlock, pBlockDataR); if (code) goto _err; - code = tBlockDataSetSchema(pBlockData, pCommitter->skmTable.pTSchema); - if (code) goto _err; + tBlockDataClear(pBlockDataW); + int32_t iRow = 0; + TSDBROW row; + TSDBROW *pRow1 = tsdbTbDataIterGet(pIter); + TSDBROW *pRow2 = &row; + *pRow2 = tsdbRowFromBlockData(pBlockDataR, iRow); + while (pRow1 && pRow2) { + int32_t c = tsdbRowCmprFn(pRow1, pRow2); + + if (c < 0) { + code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow1)); + if (code) goto _err; - // loop to merge - pRow1 = tsdbTbDataIterGet(pIter); - *pRow2 = tsdbRowFromBlockData(pBlockDataMerge, 0); - ASSERT(pRow1 && tsdbKeyCmprFn(&TSDBROW_KEY(pRow1), &toKey) < 0); - ASSERT(tsdbKeyCmprFn(&TSDBROW_KEY(pRow2), &toKey) < 0); - code = tsdbCommitterUpdateRowSchema(pCommitter, pBlockIdx->suid, pBlockIdx->uid, TSDBROW_SVERSION(pRow1)); - if (code) goto _err; + code = tBlockDataAppendRow(pBlockDataW, pRow1, pCommitter->skmRow.pTSchema, pTbData->uid); + if (code) goto _err; - tBlockReset(pBlock); - tBlockDataClearData(pBlockData); - while (true) { - if (pRow1 == NULL && pRow2 == NULL) { - if (pBlockData->nRow == 0) { - break; - } else { - goto _write_block; - } - } + // next + tsdbTbDataIterNext(pIter); + pRow1 = tsdbTbDataIterGet(pIter); + } else if (c > 0) { + code = tBlockDataAppendRow(pBlockDataW, pRow2, NULL, pTbData->uid); + if (code) goto _err; - if (pRow1 && pRow2) { - int32_t c = tsdbRowCmprFn(pRow1, pRow2); - if (c < 0) { - goto _append_mem_row; - } else if (c > 0) { - goto _append_block_row; + iRow++; + if (iRow < pBlockDataR->nRow) { + *pRow2 = tsdbRowFromBlockData(pBlockDataR, iRow); } else { - ASSERT(0); + pRow2 = NULL; } - } else if (pRow1) { - goto _append_mem_row; } else { - goto _append_block_row; - } - - _append_mem_row: - code = tBlockDataAppendRow(pBlockData, pRow1, pCommitter->skmRow.pTSchema); - if (code) goto _err; - - tsdbTbDataIterNext(pIter); - pRow1 = tsdbTbDataIterGet(pIter); - if (pRow1) { - if (tsdbKeyCmprFn(&TSDBROW_KEY(pRow1), &toKey) < 0) { - code = tsdbCommitterUpdateRowSchema(pCommitter, pBlockIdx->suid, pBlockIdx->uid, TSDBROW_SVERSION(pRow1)); - if (code) goto _err; - } else { - pRow1 = NULL; - } + ASSERT(0); } - if (pBlockData->nRow >= pCommitter->maxRow * 4 / 5) { - goto _write_block; - } else { - continue; + // check + if (pBlockDataW->nRow >= pCommitter->maxRow * 4 / 5) { + code = tsdbCommitDataBlock(pCommitter, NULL); + if (code) goto _err; } + } - _append_block_row: - code = tBlockDataAppendRow(pBlockData, pRow2, NULL); + while (pRow2) { + code = tBlockDataAppendRow(pBlockDataW, pRow2, NULL, pTbData->uid); if (code) goto _err; - if (pRow2->iRow + 1 < pBlockDataMerge->nRow) { - *pRow2 = tsdbRowFromBlockData(pBlockDataMerge, pRow2->iRow + 1); + iRow++; + if (iRow < pBlockDataR->nRow) { + *pRow2 = tsdbRowFromBlockData(pBlockDataR, iRow); } else { pRow2 = NULL; } - if (pBlockData->nRow >= pCommitter->maxRow * 4 / 5) { - goto _write_block; - } else { - continue; + // check + if (pBlockDataW->nRow >= pCommitter->maxRow * 4 / 5) { + code = tsdbCommitDataBlock(pCommitter, NULL); + if (code) goto _err; } + } - _write_block: - code = tsdbCommitBlockData(pCommitter, pBlockData, pBlock, pBlockIdx, toDataOnly); + // check + if (pBlockDataW->nRow > 0) { + code = tsdbCommitDataBlock(pCommitter, NULL); if (code) goto _err; - - tBlockReset(pBlock); - tBlockDataClearData(pBlockData); } return code; _err: - tsdbError("vgId:%d, tsdb merge block and mem failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code)); + tsdbError("vgId:%d, tsdb merge commit data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code)); return code; } -static int32_t tsdbCommitTableMemData(SCommitter *pCommitter, STbDataIter *pIter, TSDBKEY toKey, int8_t toDataOnly) { +static int32_t tsdbCommitTableMemData(SCommitter *pCommitter, STbDataIter *pIter, TSDBKEY toKey) { int32_t code = 0; - TSDBROW *pRow; - SBlock block; - SBlock *pBlock = █ + STbData *pTbData = pIter->pTbData; SBlockData *pBlockData = &pCommitter->dWriter.bData; - int64_t suid = pIter->pTbData->suid; - int64_t uid = pIter->pTbData->uid; - - code = tBlockDataSetSchema(pBlockData, pCommitter->skmTable.pTSchema); - if (code) goto _err; - tBlockReset(pBlock); - tBlockDataClearData(pBlockData); - pRow = tsdbTbDataIterGet(pIter); - ASSERT(pRow && tsdbKeyCmprFn(&TSDBROW_KEY(pRow), &toKey) < 0); + tBlockDataClear(pBlockData); + TSDBROW *pRow = tsdbTbDataIterGet(pIter); while (true) { if (pRow == NULL) { if (pBlockData->nRow > 0) { @@ -539,33 +693,27 @@ static int32_t tsdbCommitTableMemData(SCommitter *pCommitter, STbDataIter *pIter } // update schema - code = tsdbCommitterUpdateRowSchema(pCommitter, suid, uid, TSDBROW_SVERSION(pRow)); + code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow)); if (code) goto _err; // append - code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema); + code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema, pTbData->uid); if (code) goto _err; tsdbTbDataIterNext(pIter); pRow = tsdbTbDataIterGet(pIter); - // if (pRow && tsdbKeyCmprFn(&TSDBROW_KEY(pRow), &toKey) >= 0) pRow = NULL; - // crash on CI, use the block following if (pRow) { - TSDBKEY tmpKey = TSDBROW_KEY(pRow); - if (tsdbKeyCmprFn(&tmpKey, &toKey) >= 0) { + TSDBKEY rowKey = TSDBROW_KEY(pRow); + if (tsdbKeyCmprFn(&rowKey, &toKey) >= 0) { pRow = NULL; } } - if (pBlockData->nRow >= pCommitter->maxRow * 4 / 5) goto _write_block; - continue; - - _write_block: - code = tsdbCommitBlockData(pCommitter, pBlockData, pBlock, &(SBlockIdx){.suid = suid, .uid = uid}, toDataOnly); - if (code) goto _err; - - tBlockReset(pBlock); - tBlockDataClearData(pBlockData); + if (pBlockData->nRow >= pCommitter->maxRow * 4 / 5) { + _write_block: + code = tsdbCommitDataBlock(pCommitter, NULL); + if (code) goto _err; + } } return code; @@ -575,65 +723,16 @@ _err: return code; } -static int32_t tsdbCommitTableDiskData(SCommitter *pCommitter, SBlock *pBlock, SBlockIdx *pBlockIdx) { - int32_t code = 0; - SBlock block; - - if (pBlock->last) { - code = tsdbReadBlockData(pCommitter->dReader.pReader, pBlockIdx, pBlock, &pCommitter->dReader.bData, NULL, NULL); - if (code) goto _err; - - tBlockReset(&block); - code = tsdbCommitBlockData(pCommitter, &pCommitter->dReader.bData, &block, pBlockIdx, 0); - if (code) goto _err; - } else { - code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pBlock, tPutBlock); - if (code) goto _err; - } - - return code; - -_err: - tsdbError("vgId:%d, tsdb commit table disk data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code)); - return code; -} +static int32_t tsdbGetNumOfRowsLessThan(STbDataIter *pIter, TSDBKEY key) { + int32_t nRow = 0; -static int32_t tsdbCommitTableDataEnd(SCommitter *pCommitter, int64_t suid, int64_t uid) { - int32_t code = 0; - SBlockIdx blockIdx = {.suid = suid, .uid = uid}; - SBlockIdx *pBlockIdx = &blockIdx; - - code = tsdbWriteBlock(pCommitter->dWriter.pWriter, &pCommitter->dWriter.mBlock, NULL, pBlockIdx); - if (code) goto _err; - - if (taosArrayPush(pCommitter->dWriter.aBlockIdx, pBlockIdx) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } - - return code; - -_err: - tsdbError("vgId:%d, commit table data end failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code)); - return code; -} - -static int32_t tsdbGetOvlpNRow(STbDataIter *pIter, SBlock *pBlock) { - int32_t nRow = 0; - TSDBROW *pRow; - TSDBKEY key; - int32_t c = 0; STbDataIter iter = *pIter; - - iter.pRow = NULL; while (true) { - pRow = tsdbTbDataIterGet(&iter); - + TSDBROW *pRow = tsdbTbDataIterGet(&iter); if (pRow == NULL) break; - key = TSDBROW_KEY(pRow); - c = tBlockCmprFn(&(SBlock){.maxKey = key, .minKey = key}, pBlock); - if (c == 0) { + int32_t c = tsdbKeyCmprFn(&TSDBROW_KEY(pRow), &key); + if (c < 0) { nRow++; tsdbTbDataIterNext(&iter); } else if (c > 0) { @@ -648,42 +747,33 @@ static int32_t tsdbGetOvlpNRow(STbDataIter *pIter, SBlock *pBlock) { static int32_t tsdbMergeAsSubBlock(SCommitter *pCommitter, STbDataIter *pIter, SBlock *pBlock) { int32_t code = 0; + STbData *pTbData = pIter->pTbData; SBlockData *pBlockData = &pCommitter->dWriter.bData; - SBlockIdx *pBlockIdx = &(SBlockIdx){.suid = pIter->pTbData->suid, .uid = pIter->pTbData->uid}; - SBlock block; - TSDBROW *pRow; - - code = tBlockDataSetSchema(pBlockData, pCommitter->skmTable.pTSchema); - if (code) goto _err; - pRow = tsdbTbDataIterGet(pIter); - code = tsdbCommitterUpdateRowSchema(pCommitter, pBlockIdx->suid, pBlockIdx->uid, TSDBROW_SVERSION(pRow)); - if (code) goto _err; + tBlockDataClear(pBlockData); + TSDBROW *pRow = tsdbTbDataIterGet(pIter); while (true) { if (pRow == NULL) break; - code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema); + + code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow)); + if (code) goto _err; + + code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema, pTbData->uid); if (code) goto _err; tsdbTbDataIterNext(pIter); pRow = tsdbTbDataIterGet(pIter); if (pRow) { - TSDBKEY key = TSDBROW_KEY(pRow); - int32_t c = tBlockCmprFn(&(SBlock){.minKey = key, .maxKey = key}, pBlock); - - if (c == 0) { - code = - tsdbCommitterUpdateRowSchema(pCommitter, pIter->pTbData->suid, pIter->pTbData->uid, TSDBROW_SVERSION(pRow)); - if (code) goto _err; - } else if (c > 0) { + TSDBKEY rowKey = TSDBROW_KEY(pRow); + if (tsdbKeyCmprFn(&rowKey, &pBlock->maxKey) > 0) { pRow = NULL; - } else { - ASSERT(0); } } } - block = *pBlock; - code = tsdbCommitBlockData(pCommitter, pBlockData, &block, pBlockIdx, 0); + ASSERT(pBlockData->nRow > 0 && pBlock->nRow + pBlockData->nRow <= pCommitter->maxRow); + + code = tsdbCommitDataBlock(pCommitter, pBlock); if (code) goto _err; return code; @@ -693,176 +783,323 @@ _err: return code; } -static int32_t tsdbCommitTableData(SCommitter *pCommitter, STbData *pTbData, SBlockIdx *pBlockIdx) { - int32_t code = 0; - STbDataIter iter = {0}; - STbDataIter *pIter = &iter; - TSDBROW *pRow; - int32_t iBlock; - int32_t nBlock; - int64_t suid; - int64_t uid; +static int32_t tsdbMergeCommitLast(SCommitter *pCommitter, STbDataIter *pIter) { + int32_t code = 0; + STbData *pTbData = pIter->pTbData; + int32_t nRow = tsdbGetNumOfRowsLessThan(pIter, (TSDBKEY){.ts = pCommitter->maxKey + 1, .version = VERSION_MIN}); - if (pTbData) { - tsdbTbDataIterOpen(pTbData, &(TSDBKEY){.ts = pCommitter->minKey, .version = VERSION_MIN}, 0, pIter); - pRow = tsdbTbDataIterGet(pIter); - if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) pRow = NULL; + if (pCommitter->dReader.pRowInfo && tTABLEIDCmprFn(pTbData, pCommitter->dReader.pRowInfo) == 0) { + if (pCommitter->dReader.pRowInfo->suid) { // super table + for (int32_t iRow = pCommitter->dReader.iRow; iRow < pCommitter->dReader.bDatal.nRow; iRow++) { + if (pTbData->uid != pCommitter->dReader.bDatal.aUid[iRow]) break; + nRow++; + } + } else { // normal table + ASSERT(pCommitter->dReader.iRow == 0); + nRow += pCommitter->dReader.bDatal.nRow; + } + } - suid = pTbData->suid; - uid = pTbData->uid; - } else { - pIter = NULL; + if (nRow == 0) goto _exit; + + TSDBROW *pRow = tsdbTbDataIterGet(pIter); + if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) { pRow = NULL; } - if (pBlockIdx) { - code = tsdbReadBlock(pCommitter->dReader.pReader, pBlockIdx, &pCommitter->dReader.mBlock, NULL); - if (code) goto _err; + SRowInfo *pRowInfo = pCommitter->dReader.pRowInfo; + if (pRowInfo && pRowInfo->uid != pTbData->uid) { + pRowInfo = NULL; + } - nBlock = pCommitter->dReader.mBlock.nItem; - ASSERT(nBlock > 0); + while (nRow) { + SBlockData *pBlockData; + int8_t toData; - suid = pBlockIdx->suid; - uid = pBlockIdx->uid; - } else { - nBlock = 0; - } + if (nRow < pCommitter->minRow) { // to .last + toData = 0; + pBlockData = &pCommitter->dWriter.bDatal; - if (pRow == NULL && nBlock == 0) goto _exit; + // commit and reset block data schema if need + // QUESTION: Is there a case that pBlockData->nRow == 0 but need to change schema ? + if (pBlockData->suid || pBlockData->uid) { + if (pBlockData->suid != pTbData->suid || pBlockData->suid == 0) { + if (pBlockData->nRow > 0) { + code = tsdbCommitLastBlock(pCommitter); + if (code) goto _err; + } - // start =========== - tMapDataReset(&pCommitter->dWriter.mBlock); - SBlock block; - SBlock *pBlock = █ + tBlockDataReset(pBlockData); + } + } - iBlock = 0; - if (iBlock < nBlock) { - tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock); - } else { - pBlock = NULL; - } + // set block data schema if need + if (pBlockData->suid == 0 && pBlockData->uid == 0) { + code = tsdbCommitterUpdateTableSchema(pCommitter, pTbData->suid, pTbData->uid); + if (code) goto _err; - if (pRow) { - code = tsdbCommitterUpdateTableSchema(pCommitter, pTbData->suid, pTbData->uid, pTbData->maxSkmVer); - if (code) goto _err; - } + code = + tBlockDataInit(pBlockData, pTbData->suid, pTbData->suid ? 0 : pTbData->uid, pCommitter->skmTable.pTSchema); + if (code) goto _err; + } - // merge =========== - while (true) { - if (pRow == NULL && pBlock == NULL) break; + if (pBlockData->nRow + nRow > pCommitter->maxRow) { + code = tsdbCommitLastBlock(pCommitter); + if (code) goto _err; + } + } else { // to .data + toData = 1; + pBlockData = &pCommitter->dWriter.bData; + ASSERT(pBlockData->nRow == 0); + } + + while (pRow && pRowInfo) { + int32_t c = tsdbRowCmprFn(pRow, &pRowInfo->row); + if (c < 0) { + code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow)); + if (code) goto _err; - if (pRow && pBlock) { - if (pBlock->last) { - code = tsdbMergeTableData(pCommitter, pIter, pBlock, - (TSDBKEY){.ts = pCommitter->maxKey + 1, .version = VERSION_MIN}, 0); + code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema, pTbData->uid); if (code) goto _err; + tsdbTbDataIterNext(pIter); pRow = tsdbTbDataIterGet(pIter); - if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) pRow = NULL; - iBlock++; - if (iBlock < nBlock) { - tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock); - } else { - pBlock = NULL; + if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) { + pRow = NULL; } + } else if (c > 0) { + code = tBlockDataAppendRow(pBlockData, &pRowInfo->row, NULL, pTbData->uid); + if (code) goto _err; + + code = tsdbCommitterNextLastRow(pCommitter); + if (code) goto _err; - ASSERT(pRow == NULL && pBlock == NULL); + pRowInfo = pCommitter->dReader.pRowInfo; + if (pRowInfo && pRowInfo->uid != pTbData->uid) { + pRowInfo = NULL; + } } else { - int32_t c = tBlockCmprFn(&(SBlock){.maxKey = TSDBROW_KEY(pRow), .minKey = TSDBROW_KEY(pRow)}, pBlock); - if (c > 0) { - // only disk data - code = tsdbCommitTableDiskData(pCommitter, pBlock, pBlockIdx); - if (code) goto _err; + ASSERT(0); + } - iBlock++; - if (iBlock < nBlock) { - tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock); - } else { - pBlock = NULL; - } - } else if (c < 0) { - // only memory data - code = tsdbCommitTableMemData(pCommitter, pIter, pBlock->minKey, 1); + nRow--; + if (toData) { + if (nRow == 0 || pBlockData->nRow >= pCommitter->maxRow * 4 / 5) { + code = tsdbCommitDataBlock(pCommitter, NULL); if (code) goto _err; + goto _outer_break; + } + } + } - pRow = tsdbTbDataIterGet(pIter); - if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) pRow = NULL; - } else { - // merge memory and disk - int32_t nOvlp = tsdbGetOvlpNRow(pIter, pBlock); - ASSERT(nOvlp); - if (pBlock->nRow + nOvlp <= pCommitter->maxRow && pBlock->nSubBlock < TSDB_MAX_SUBBLOCKS) { - code = tsdbMergeAsSubBlock(pCommitter, pIter, pBlock); - if (code) goto _err; - } else { - TSDBKEY toKey = {.ts = pCommitter->maxKey + 1, .version = VERSION_MIN}; - int8_t toDataOnly = 0; + while (pRow) { + code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow)); + if (code) goto _err; + + code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema, pTbData->uid); + if (code) goto _err; + + tsdbTbDataIterNext(pIter); + pRow = tsdbTbDataIterGet(pIter); + if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) { + pRow = NULL; + } - if (iBlock < nBlock - 1) { - toDataOnly = 1; + nRow--; + if (toData) { + if (nRow == 0 || pBlockData->nRow >= pCommitter->maxRow * 4 / 5) { + code = tsdbCommitDataBlock(pCommitter, NULL); + if (code) goto _err; + goto _outer_break; + } + } + } - SBlock nextBlock = {0}; - tBlockReset(&nextBlock); - tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock + 1, &nextBlock, tGetBlock); - toKey = nextBlock.minKey; - } + while (pRowInfo) { + code = tBlockDataAppendRow(pBlockData, &pRowInfo->row, NULL, pTbData->uid); + if (code) goto _err; - code = tsdbMergeTableData(pCommitter, pIter, pBlock, toKey, toDataOnly); - if (code) goto _err; - } + code = tsdbCommitterNextLastRow(pCommitter); + if (code) goto _err; - pRow = tsdbTbDataIterGet(pIter); - if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) pRow = NULL; - iBlock++; - if (iBlock < nBlock) { - tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock); - } else { - pBlock = NULL; - } + pRowInfo = pCommitter->dReader.pRowInfo; + if (pRowInfo && pRowInfo->uid != pTbData->uid) { + pRowInfo = NULL; + } + + nRow--; + if (toData) { + if (nRow == 0 || pBlockData->nRow >= pCommitter->maxRow * 4 / 5) { + code = tsdbCommitDataBlock(pCommitter, NULL); + if (code) goto _err; + goto _outer_break; } } - } else if (pBlock) { - code = tsdbCommitTableDiskData(pCommitter, pBlock, pBlockIdx); + } + + _outer_break: + ASSERT(nRow >= 0); + } + +_exit: + return code; + +_err: + tsdbError("vgId:%d tsdb merge commit last failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code)); + return code; +} + +static int32_t tsdbCommitTableData(SCommitter *pCommitter, STbData *pTbData) { + int32_t code = 0; + + ASSERT(pCommitter->dReader.pBlockIdx == NULL || tTABLEIDCmprFn(pCommitter->dReader.pBlockIdx, pTbData) >= 0); + ASSERT(pCommitter->dReader.pRowInfo == NULL || tTABLEIDCmprFn(pCommitter->dReader.pRowInfo, pTbData) >= 0); + + // merge commit table data + STbDataIter iter = {0}; + STbDataIter *pIter = &iter; + TSDBROW *pRow; + + tsdbTbDataIterOpen(pTbData, &(TSDBKEY){.ts = pCommitter->minKey, .version = VERSION_MIN}, 0, pIter); + pRow = tsdbTbDataIterGet(pIter); + if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) { + pRow = NULL; + } + + if (pRow == NULL) { + if (pCommitter->dReader.pBlockIdx && tTABLEIDCmprFn(pCommitter->dReader.pBlockIdx, pTbData) == 0) { + SBlockIdx blockIdx = {.suid = pTbData->suid, .uid = pTbData->uid}; + code = tsdbWriteBlock(pCommitter->dWriter.pWriter, &pCommitter->dReader.mBlock, &blockIdx); + if (code) goto _err; + + if (taosArrayPush(pCommitter->dWriter.aBlockIdx, &blockIdx) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + } + + goto _exit; + } + + int32_t iBlock = 0; + SBlock block; + SBlock *pBlock = █ + if (pCommitter->dReader.pBlockIdx && tTABLEIDCmprFn(pTbData, pCommitter->dReader.pBlockIdx) == 0) { + tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock); + } else { + pBlock = NULL; + } + + code = tsdbCommitterUpdateTableSchema(pCommitter, pTbData->suid, pTbData->uid); + if (code) goto _err; + + tMapDataReset(&pCommitter->dWriter.mBlock); + code = tBlockDataInit(&pCommitter->dReader.bData, pTbData->suid, pTbData->uid, pCommitter->skmTable.pTSchema); + if (code) goto _err; + code = tBlockDataInit(&pCommitter->dWriter.bData, pTbData->suid, pTbData->uid, pCommitter->skmTable.pTSchema); + if (code) goto _err; + + // .data merge + while (pBlock && pRow) { + int32_t c = tBlockCmprFn(pBlock, &(SBlock){.minKey = TSDBROW_KEY(pRow), .maxKey = TSDBROW_KEY(pRow)}); + if (c < 0) { // disk + code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pBlock, tPutBlock); if (code) goto _err; + // next iBlock++; - if (iBlock < nBlock) { + if (iBlock < pCommitter->dReader.mBlock.nItem) { tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock); } else { pBlock = NULL; } - } else { - code = - tsdbCommitTableMemData(pCommitter, pIter, (TSDBKEY){.ts = pCommitter->maxKey + 1, .version = VERSION_MIN}, 0); + } else if (c > 0) { // memory + code = tsdbCommitTableMemData(pCommitter, pIter, pBlock->minKey); if (code) goto _err; + // next + pRow = tsdbTbDataIterGet(pIter); + if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) { + pRow = NULL; + } + } else { // merge + int32_t nOvlp = tsdbGetNumOfRowsLessThan(pIter, pBlock->maxKey); + + ASSERT(nOvlp > 0); + + if (pBlock->nRow + nOvlp <= pCommitter->maxRow && pBlock->nSubBlock < TSDB_MAX_SUBBLOCKS) { + code = tsdbMergeAsSubBlock(pCommitter, pIter, pBlock); + if (code) goto _err; + } else { + code = tsdbMergeCommitData(pCommitter, pIter, pBlock); + if (code) goto _err; + } + + // next pRow = tsdbTbDataIterGet(pIter); - if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) pRow = NULL; - ASSERT(pRow == NULL); + if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) { + pRow = NULL; + } + iBlock++; + if (iBlock < pCommitter->dReader.mBlock.nItem) { + tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock); + } else { + pBlock = NULL; + } } } - // end ===================== - code = tsdbCommitTableDataEnd(pCommitter, suid, uid); + while (pBlock) { + code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pBlock, tPutBlock); + if (code) goto _err; + + // next + iBlock++; + if (iBlock < pCommitter->dReader.mBlock.nItem) { + tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock); + } else { + pBlock = NULL; + } + } + + // .data append and .last merge + code = tsdbMergeCommitLast(pCommitter, pIter); if (code) goto _err; + // end + if (pCommitter->dWriter.mBlock.nItem > 0) { + SBlockIdx blockIdx = {.suid = pTbData->suid, .uid = pTbData->uid}; + code = tsdbWriteBlock(pCommitter->dWriter.pWriter, &pCommitter->dWriter.mBlock, &blockIdx); + if (code) goto _err; + + if (taosArrayPush(pCommitter->dWriter.aBlockIdx, &blockIdx) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + } + _exit: - if (pIter) { - pRow = tsdbTbDataIterGet(pIter); - if (pRow) pCommitter->nextKey = TMIN(pCommitter->nextKey, TSDBROW_TS(pRow)); + pRow = tsdbTbDataIterGet(pIter); + if (pRow) { + pCommitter->nextKey = TMIN(pCommitter->nextKey, TSDBROW_TS(pRow)); } + return code; _err: - tsdbError("vgId:%d, tsdb commit table data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code)); + tsdbError("vgId:%d tsdb commit table data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code)); return code; } static int32_t tsdbCommitFileDataEnd(SCommitter *pCommitter) { int32_t code = 0; - // write blockIdx - code = tsdbWriteBlockIdx(pCommitter->dWriter.pWriter, pCommitter->dWriter.aBlockIdx, NULL); + // write aBlockIdx + code = tsdbWriteBlockIdx(pCommitter->dWriter.pWriter, pCommitter->dWriter.aBlockIdx); + if (code) goto _err; + + // write aBlockL + code = tsdbWriteBlockL(pCommitter->dWriter.pWriter, pCommitter->dWriter.aBlockL); if (code) goto _err; // update file header @@ -890,6 +1127,98 @@ _err: return code; } +static int32_t tsdbMoveCommitData(SCommitter *pCommitter, TABLEID toTable) { + int32_t code = 0; + + // .data + while (true) { + if (pCommitter->dReader.pBlockIdx == NULL || tTABLEIDCmprFn(pCommitter->dReader.pBlockIdx, &toTable) >= 0) break; + + SBlockIdx blockIdx = *pCommitter->dReader.pBlockIdx; + code = tsdbWriteBlock(pCommitter->dWriter.pWriter, &pCommitter->dReader.mBlock, &blockIdx); + if (code) goto _err; + + if (taosArrayPush(pCommitter->dWriter.aBlockIdx, &blockIdx) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + + code = tsdbCommitterNextTableData(pCommitter); + if (code) goto _err; + } + + // .last + while (true) { + if (pCommitter->dReader.pRowInfo == NULL || tTABLEIDCmprFn(pCommitter->dReader.pRowInfo, &toTable) >= 0) break; + + SBlockData *pBlockDataR = &pCommitter->dReader.bDatal; + SBlockData *pBlockDataW = &pCommitter->dWriter.bDatal; + tb_uid_t suid = pCommitter->dReader.pRowInfo->suid; + tb_uid_t uid = pCommitter->dReader.pRowInfo->uid; + + ASSERT((pBlockDataR->suid && !pBlockDataR->uid) || (!pBlockDataR->suid && pBlockDataR->uid)); + ASSERT(pBlockDataR->nRow > 0); + + // commit and reset block data schema if need + if (pBlockDataW->suid || pBlockDataW->uid) { + if (pBlockDataW->suid != suid || pBlockDataW->suid == 0) { + if (pBlockDataW->nRow > 0) { + code = tsdbCommitLastBlock(pCommitter); + if (code) goto _err; + } + tBlockDataReset(pBlockDataW); + } + } + + // set block data schema if need + if (pBlockDataW->suid == 0 && pBlockDataW->uid == 0) { + code = tsdbCommitterUpdateTableSchema(pCommitter, suid, uid); + if (code) goto _err; + + code = tBlockDataInit(pBlockDataW, suid, suid ? 0 : uid, pCommitter->skmTable.pTSchema); + if (code) goto _err; + } + + // check if it can make sure that one table data in one block + int32_t nRow = 0; + if (pBlockDataR->suid) { + int32_t iRow = pCommitter->dReader.iRow; + while ((iRow < pBlockDataR->nRow) && (pBlockDataR->aUid[iRow] == uid)) { + nRow++; + iRow++; + } + } else { + ASSERT(pCommitter->dReader.iRow == 0); + nRow = pBlockDataR->nRow; + } + + ASSERT(nRow > 0 && nRow < pCommitter->minRow); + + if (pBlockDataW->nRow + nRow > pCommitter->maxRow) { + ASSERT(pBlockDataW->nRow > 0); + + code = tsdbCommitLastBlock(pCommitter); + if (code) goto _err; + } + + while (nRow > 0) { + code = tBlockDataAppendRow(pBlockDataW, &pCommitter->dReader.pRowInfo->row, NULL, uid); + if (code) goto _err; + + code = tsdbCommitterNextLastRow(pCommitter); + if (code) goto _err; + + nRow--; + } + } + + return code; + +_err: + tsdbError("vgId:%d tsdb move commit data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code)); + return code; +} + static int32_t tsdbCommitFileData(SCommitter *pCommitter) { int32_t code = 0; STsdb *pTsdb = pCommitter->pTsdb; @@ -900,59 +1229,30 @@ static int32_t tsdbCommitFileData(SCommitter *pCommitter) { if (code) goto _err; // commit file data impl - int32_t iTbData = 0; - int32_t nTbData = taosArrayGetSize(pMemTable->aTbData); - int32_t iBlockIdx = 0; - int32_t nBlockIdx = taosArrayGetSize(pCommitter->dReader.aBlockIdx); - STbData *pTbData; - SBlockIdx *pBlockIdx; + for (int32_t iTbData = 0; iTbData < taosArrayGetSize(pCommitter->aTbDataP); iTbData++) { + STbData *pTbData = (STbData *)taosArrayGetP(pCommitter->aTbDataP, iTbData); - ASSERT(nTbData > 0); - - pTbData = (STbData *)taosArrayGetP(pMemTable->aTbData, iTbData); - pBlockIdx = (iBlockIdx < nBlockIdx) ? (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, iBlockIdx) : NULL; - while (pTbData || pBlockIdx) { - if (pTbData && pBlockIdx) { - int32_t c = tTABLEIDCmprFn(pTbData, pBlockIdx); - - if (c == 0) { - goto _commit_table_mem_and_disk; - } else if (c < 0) { - goto _commit_table_mem_data; - } else { - goto _commit_table_disk_data; - } - } else if (pBlockIdx) { - goto _commit_table_disk_data; - } else { - goto _commit_table_mem_data; - } - - _commit_table_mem_data: - code = tsdbCommitTableData(pCommitter, pTbData, NULL); + // move commit until current (suid, uid) + code = tsdbMoveCommitData(pCommitter, *(TABLEID *)pTbData); if (code) goto _err; - iTbData++; - pTbData = (iTbData < nTbData) ? (STbData *)taosArrayGetP(pMemTable->aTbData, iTbData) : NULL; - continue; - - _commit_table_disk_data: - code = tsdbCommitTableData(pCommitter, NULL, pBlockIdx); + // commit current table data + code = tsdbCommitTableData(pCommitter, pTbData); if (code) goto _err; - iBlockIdx++; - pBlockIdx = (iBlockIdx < nBlockIdx) ? (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, iBlockIdx) : NULL; - continue; + // move next reader table data if need + if (pCommitter->dReader.pBlockIdx && tTABLEIDCmprFn(pTbData, pCommitter->dReader.pBlockIdx) == 0) { + code = tsdbCommitterNextTableData(pCommitter); + if (code) goto _err; + } + } - _commit_table_mem_and_disk: - code = tsdbCommitTableData(pCommitter, pTbData, pBlockIdx); - if (code) goto _err; + code = tsdbMoveCommitData(pCommitter, (TABLEID){.suid = INT64_MAX, .uid = INT64_MAX}); + if (code) goto _err; - iBlockIdx++; - pBlockIdx = (iBlockIdx < nBlockIdx) ? (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, iBlockIdx) : NULL; - iTbData++; - pTbData = (iTbData < nTbData) ? (STbData *)taosArrayGetP(pMemTable->aTbData, iTbData) : NULL; - continue; + if (pCommitter->dWriter.bDatal.nRow > 0) { + code = tsdbCommitLastBlock(pCommitter); + if (code) goto _err; } // commit file data end @@ -987,6 +1287,11 @@ static int32_t tsdbStartCommit(STsdb *pTsdb, SCommitter *pCommitter) { pCommitter->minRow = pTsdb->pVnode->config.tsdbCfg.minRows; pCommitter->maxRow = pTsdb->pVnode->config.tsdbCfg.maxRows; pCommitter->cmprAlg = pTsdb->pVnode->config.tsdbCfg.compression; + pCommitter->aTbDataP = tsdbMemTableGetTbDataArray(pTsdb->imem); + if (pCommitter->aTbDataP == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } code = tsdbFSCopy(pTsdb, &pCommitter->fs); if (code) goto _err; @@ -1001,22 +1306,42 @@ _err: static int32_t tsdbCommitDataStart(SCommitter *pCommitter) { int32_t code = 0; + // Reader pCommitter->dReader.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx)); if (pCommitter->dReader.aBlockIdx == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _exit; } + code = tBlockDataCreate(&pCommitter->dReader.bData); + if (code) goto _exit; + + pCommitter->dReader.aBlockL = taosArrayInit(0, sizeof(SBlockL)); + if (pCommitter->dReader.aBlockL == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + + code = tBlockDataCreate(&pCommitter->dReader.bDatal); + if (code) goto _exit; + + // Writer pCommitter->dWriter.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx)); if (pCommitter->dWriter.aBlockIdx == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _exit; } - code = tBlockDataInit(&pCommitter->dReader.bData); + pCommitter->dWriter.aBlockL = taosArrayInit(0, sizeof(SBlockL)); + if (pCommitter->dWriter.aBlockL == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + + code = tBlockDataCreate(&pCommitter->dWriter.bData); if (code) goto _exit; - code = tBlockDataInit(&pCommitter->dWriter.bData); + code = tBlockDataCreate(&pCommitter->dWriter.bDatal); if (code) goto _exit; _exit: @@ -1024,12 +1349,19 @@ _exit: } static void tsdbCommitDataEnd(SCommitter *pCommitter) { + // Reader taosArrayDestroy(pCommitter->dReader.aBlockIdx); tMapDataClear(&pCommitter->dReader.mBlock); - tBlockDataClear(&pCommitter->dReader.bData, 1); + tBlockDataDestroy(&pCommitter->dReader.bData, 1); + taosArrayDestroy(pCommitter->dReader.aBlockL); + tBlockDataDestroy(&pCommitter->dReader.bDatal, 1); + + // Writer taosArrayDestroy(pCommitter->dWriter.aBlockIdx); + taosArrayDestroy(pCommitter->dWriter.aBlockL); tMapDataClear(&pCommitter->dWriter.mBlock); - tBlockDataClear(&pCommitter->dWriter.bData, 1); + tBlockDataDestroy(&pCommitter->dWriter.bData, 1); + tBlockDataDestroy(&pCommitter->dWriter.bDatal, 1); tTSchemaDestroy(pCommitter->skmTable.pTSchema); tTSchemaDestroy(pCommitter->skmRow.pTSchema); } @@ -1049,9 +1381,6 @@ static int32_t tsdbCommitData(SCommitter *pCommitter) { // impl ==================== pCommitter->nextKey = pMemTable->minKey; while (pCommitter->nextKey < TSKEY_MAX) { - pCommitter->commitFid = tsdbKeyFid(pCommitter->nextKey, pCommitter->minutes, pCommitter->precision); - tsdbFidKeyRange(pCommitter->commitFid, pCommitter->minutes, pCommitter->precision, &pCommitter->minKey, - &pCommitter->maxKey); code = tsdbCommitFileData(pCommitter); if (code) goto _err; } @@ -1088,13 +1417,13 @@ static int32_t tsdbCommitDel(SCommitter *pCommitter) { int32_t iDelIdx = 0; int32_t nDelIdx = taosArrayGetSize(pCommitter->aDelIdx); int32_t iTbData = 0; - int32_t nTbData = taosArrayGetSize(pMemTable->aTbData); + int32_t nTbData = taosArrayGetSize(pCommitter->aTbDataP); STbData *pTbData; SDelIdx *pDelIdx; ASSERT(nTbData > 0); - pTbData = (STbData *)taosArrayGetP(pMemTable->aTbData, iTbData); + pTbData = (STbData *)taosArrayGetP(pCommitter->aTbDataP, iTbData); pDelIdx = (iDelIdx < nDelIdx) ? (SDelIdx *)taosArrayGet(pCommitter->aDelIdx, iDelIdx) : NULL; while (true) { if (pTbData == NULL && pDelIdx == NULL) break; @@ -1120,7 +1449,7 @@ static int32_t tsdbCommitDel(SCommitter *pCommitter) { if (code) goto _err; iTbData++; - pTbData = (iTbData < nTbData) ? (STbData *)taosArrayGetP(pMemTable->aTbData, iTbData) : NULL; + pTbData = (iTbData < nTbData) ? (STbData *)taosArrayGetP(pCommitter->aTbDataP, iTbData) : NULL; continue; _commit_disk_del: @@ -1136,7 +1465,7 @@ static int32_t tsdbCommitDel(SCommitter *pCommitter) { if (code) goto _err; iTbData++; - pTbData = (iTbData < nTbData) ? (STbData *)taosArrayGetP(pMemTable->aTbData, iTbData) : NULL; + pTbData = (iTbData < nTbData) ? (STbData *)taosArrayGetP(pCommitter->aTbDataP, iTbData) : NULL; iDelIdx++; pDelIdx = (iDelIdx < nDelIdx) ? (SDelIdx *)taosArrayGet(pCommitter->aDelIdx, iDelIdx) : NULL; continue; @@ -1184,6 +1513,7 @@ static int32_t tsdbEndCommit(SCommitter *pCommitter, int32_t eno) { tsdbUnrefMemTable(pMemTable); tsdbFSDestroy(&pCommitter->fs); + taosArrayDestroy(pCommitter->aTbDataP); tsdbInfo("vgId:%d, tsdb end commit", TD_VID(pTsdb->pVnode)); return code; diff --git a/source/dnode/vnode/src/tsdb/tsdbFS.c b/source/dnode/vnode/src/tsdb/tsdbFS.c index 74f1aef1fc7acc699b8dbc23521d957a2865ba3a..247de993381d98713fa6a4ca1938c11b044c8cd6 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS.c @@ -576,10 +576,7 @@ int32_t tsdbFSCopy(STsdb *pTsdb, STsdbFS *pFS) { code = TSDB_CODE_OUT_OF_MEMORY; goto _exit; } - fSet.pHeadF->nRef = 0; - fSet.pHeadF->commitID = pSet->pHeadF->commitID; - fSet.pHeadF->size = pSet->pHeadF->size; - fSet.pHeadF->offset = pSet->pHeadF->offset; + *fSet.pHeadF = *pSet->pHeadF; // data fSet.pDataF = (SDataFile *)taosMemoryMalloc(sizeof(SDataFile)); @@ -587,9 +584,7 @@ int32_t tsdbFSCopy(STsdb *pTsdb, STsdbFS *pFS) { code = TSDB_CODE_OUT_OF_MEMORY; goto _exit; } - fSet.pDataF->nRef = 0; - fSet.pDataF->commitID = pSet->pDataF->commitID; - fSet.pDataF->size = pSet->pDataF->size; + *fSet.pDataF = *pSet->pDataF; // data fSet.pLastF = (SLastFile *)taosMemoryMalloc(sizeof(SLastFile)); @@ -597,9 +592,7 @@ int32_t tsdbFSCopy(STsdb *pTsdb, STsdbFS *pFS) { code = TSDB_CODE_OUT_OF_MEMORY; goto _exit; } - fSet.pLastF->nRef = 0; - fSet.pLastF->commitID = pSet->pLastF->commitID; - fSet.pLastF->size = pSet->pLastF->size; + *fSet.pLastF = *pSet->pLastF; // last fSet.pSmaF = (SSmaFile *)taosMemoryMalloc(sizeof(SSmaFile)); @@ -607,9 +600,7 @@ int32_t tsdbFSCopy(STsdb *pTsdb, STsdbFS *pFS) { code = TSDB_CODE_OUT_OF_MEMORY; goto _exit; } - fSet.pSmaF->nRef = 0; - fSet.pSmaF->commitID = pSet->pSmaF->commitID; - fSet.pSmaF->size = pSet->pSmaF->size; + *fSet.pSmaF = *pSet->pSmaF; if (taosArrayPush(pFS->aDFileSet, &fSet) == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; diff --git a/source/dnode/vnode/src/tsdb/tsdbFile.c b/source/dnode/vnode/src/tsdb/tsdbFile.c index 52a102f911290dc7a40516d594fc378ff2942cf0..00d2ac848f6d599fef54d9957047521e27062c89 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFile.c +++ b/source/dnode/vnode/src/tsdb/tsdbFile.c @@ -58,6 +58,7 @@ int32_t tPutLastFile(uint8_t *p, SLastFile *pLastFile) { n += tPutI64v(p ? p + n : p, pLastFile->commitID); n += tPutI64v(p ? p + n : p, pLastFile->size); + n += tPutI64v(p ? p + n : p, pLastFile->offset); return n; } @@ -67,6 +68,7 @@ static int32_t tGetLastFile(uint8_t *p, SLastFile *pLastFile) { n += tGetI64v(p + n, &pLastFile->commitID); n += tGetI64v(p + n, &pLastFile->size); + n += tGetI64v(p + n, &pLastFile->offset); return n; } @@ -186,11 +188,16 @@ int32_t tPutDFileSet(uint8_t *p, SDFileSet *pSet) { n += tPutI32v(p ? p + n : p, pSet->diskId.level); n += tPutI32v(p ? p + n : p, pSet->diskId.id); n += tPutI32v(p ? p + n : p, pSet->fid); + + // data n += tPutHeadFile(p ? p + n : p, pSet->pHeadF); n += tPutDataFile(p ? p + n : p, pSet->pDataF); - n += tPutLastFile(p ? p + n : p, pSet->pLastF); n += tPutSmaFile(p ? p + n : p, pSet->pSmaF); + // last + n += tPutU8(p ? p + n : p, 1); // for future compatibility + n += tPutLastFile(p ? p + n : p, pSet->pLastF); + return n; } @@ -200,11 +207,17 @@ int32_t tGetDFileSet(uint8_t *p, SDFileSet *pSet) { n += tGetI32v(p + n, &pSet->diskId.level); n += tGetI32v(p + n, &pSet->diskId.id); n += tGetI32v(p + n, &pSet->fid); + + // data n += tGetHeadFile(p + n, pSet->pHeadF); n += tGetDataFile(p + n, pSet->pDataF); - n += tGetLastFile(p + n, pSet->pLastF); n += tGetSmaFile(p + n, pSet->pSmaF); + // last + uint8_t nLast; + n += tGetU8(p + n, &nLast); + n += tGetLastFile(p + n, pSet->pLastF); + return n; } diff --git a/source/dnode/vnode/src/tsdb/tsdbMemTable.c b/source/dnode/vnode/src/tsdb/tsdbMemTable.c index 8ae0e824cf34600d5d6d5ccd62345e5713f7d80b..a6628463f8ff231505052dc3a48d8c7a59a6eaa5 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMemTable.c +++ b/source/dnode/vnode/src/tsdb/tsdbMemTable.c @@ -15,6 +15,7 @@ #include "tsdb.h" +#define MEM_MIN_HASH 1024 #define SL_MAX_LEVEL 5 #define SL_NODE_SIZE(l) (sizeof(SMemSkipListNode) + sizeof(SMemSkipListNode *) * (l)*2) @@ -45,12 +46,12 @@ int32_t tsdbMemTableCreate(STsdb *pTsdb, SMemTable **ppMemTable) { pMemTable->nRef = 1; pMemTable->minKey = TSKEY_MAX; pMemTable->maxKey = TSKEY_MIN; - pMemTable->minVersion = VERSION_MAX; - pMemTable->maxVersion = VERSION_MIN; pMemTable->nRow = 0; pMemTable->nDel = 0; - pMemTable->aTbData = taosArrayInit(128, sizeof(STbData *)); - if (pMemTable->aTbData == NULL) { + pMemTable->nTbData = 0; + pMemTable->nBucket = MEM_MIN_HASH; + pMemTable->aBucket = (STbData **)taosMemoryCalloc(pMemTable->nBucket, sizeof(STbData *)); + if (pMemTable->aBucket == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; taosMemoryFree(pMemTable); goto _err; @@ -68,37 +69,30 @@ _err: void tsdbMemTableDestroy(SMemTable *pMemTable) { if (pMemTable) { vnodeBufPoolUnRef(pMemTable->pPool); - taosArrayDestroy(pMemTable->aTbData); + taosMemoryFree(pMemTable->aBucket); taosMemoryFree(pMemTable); } } -static int32_t tbDataPCmprFn(const void *p1, const void *p2) { - STbData *pTbData1 = *(STbData **)p1; - STbData *pTbData2 = *(STbData **)p2; +static FORCE_INLINE STbData *tsdbGetTbDataFromMemTableImpl(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid) { + STbData *pTbData = pMemTable->aBucket[TABS(uid) % pMemTable->nBucket]; - if (pTbData1->suid < pTbData2->suid) { - return -1; - } else if (pTbData1->suid > pTbData2->suid) { - return 1; - } - - if (pTbData1->uid < pTbData2->uid) { - return -1; - } else if (pTbData1->uid > pTbData2->uid) { - return 1; + while (pTbData) { + if (pTbData->uid == uid) break; + pTbData = pTbData->next; } - return 0; + return pTbData; } -void tsdbGetTbDataFromMemTable(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid, STbData **ppTbData) { - STbData *pTbData = &(STbData){.suid = suid, .uid = uid}; + +STbData *tsdbGetTbDataFromMemTable(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid) { + STbData *pTbData; taosRLockLatch(&pMemTable->latch); - void *p = taosArraySearch(pMemTable->aTbData, &pTbData, tbDataPCmprFn, TD_EQ); + pTbData = tsdbGetTbDataFromMemTableImpl(pMemTable, suid, uid); taosRUnLockLatch(&pMemTable->latch); - *ppTbData = p ? *(STbData **)p : NULL; + return pTbData; } int32_t tsdbInsertTableData(STsdb *pTsdb, int64_t version, SSubmitMsgIter *pMsgIter, SSubmitBlk *pBlock, @@ -108,29 +102,21 @@ int32_t tsdbInsertTableData(STsdb *pTsdb, int64_t version, SSubmitMsgIter *pMsgI STbData *pTbData = NULL; tb_uid_t suid = pMsgIter->suid; tb_uid_t uid = pMsgIter->uid; - int32_t sverNew; - - // check if table exists (todo: refact) - SMetaReader mr = {0}; - // SMetaEntry me = {0}; - metaReaderInit(&mr, pTsdb->pVnode->pMeta, 0); - if (metaGetTableEntryByUid(&mr, pMsgIter->uid) < 0) { - metaReaderClear(&mr); - code = TSDB_CODE_PAR_TABLE_NOT_EXIST; + + SMetaInfo info; + code = metaGetInfo(pTsdb->pVnode->pMeta, uid, &info); + if (code) { + code = TSDB_CODE_TDB_TABLE_NOT_EXIST; goto _err; } - if (pRsp->tblFName) strcat(pRsp->tblFName, mr.me.name); - - if (mr.me.type == TSDB_NORMAL_TABLE) { - sverNew = mr.me.ntbEntry.schemaRow.version; - } else { - tDecoderClear(&mr.coder); - - metaGetTableEntryByUid(&mr, mr.me.ctbEntry.suid); - sverNew = mr.me.stbEntry.schemaRow.version; + if (info.suid != suid) { + code = TSDB_CODE_INVALID_MSG; + goto _err; } - metaReaderClear(&mr); - pRsp->sver = sverNew; + if (info.suid) { + metaGetInfo(pTsdb->pVnode->pMeta, info.suid, &info); + } + pRsp->sver = info.skmVer; // create/get STbData to op code = tsdbGetOrCreateTbData(pMemTable, suid, uid, &pTbData); @@ -157,7 +143,17 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid SVBufPool *pPool = pTsdb->pVnode->inUse; TSDBKEY lastKey = {.version = version, .ts = eKey}; - // check if table exists (todo) + // check if table exists + SMetaInfo info; + code = metaGetInfo(pTsdb->pVnode->pMeta, uid, &info); + if (code) { + code = TSDB_CODE_TDB_TABLE_NOT_EXIST; + goto _err; + } + if (info.suid != suid) { + code = TSDB_CODE_INVALID_MSG; + goto _err; + } code = tsdbGetOrCreateTbData(pMemTable, suid, uid, &pTbData); if (code) { @@ -182,10 +178,6 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid pTbData->pTail = pDelData; } - // update the state of pMemTable and other (todo) - - pMemTable->minVersion = TMIN(pMemTable->minVersion, version); - pMemTable->maxVersion = TMAX(pMemTable->maxVersion, version); pMemTable->nDel++; if (TSDB_CACHE_LAST_ROW(pMemTable->pTsdb->pVnode->config) && tsdbKeyCmprFn(&lastKey, &pTbData->maxKey) >= 0) { @@ -196,9 +188,9 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid tsdbCacheDeleteLast(pTsdb->lruCache, pTbData->uid, eKey); } - tsdbError("vgId:%d, delete data from table suid:%" PRId64 " uid:%" PRId64 " skey:%" PRId64 " eKey:%" PRId64 - " since %s", - TD_VID(pTsdb->pVnode), suid, uid, sKey, eKey, tstrerror(code)); + tsdbInfo("vgId:%d, delete data from table suid:%" PRId64 " uid:%" PRId64 " skey:%" PRId64 " eKey:%" PRId64 + " since %s", + TD_VID(pTsdb->pVnode), suid, uid, sKey, eKey, tstrerror(code)); return code; _err: @@ -318,18 +310,44 @@ _exit: return pIter->pRow; } +static int32_t tsdbMemTableRehash(SMemTable *pMemTable) { + int32_t code = 0; + + int32_t nBucket = pMemTable->nBucket * 2; + STbData **aBucket = (STbData **)taosMemoryCalloc(nBucket, sizeof(STbData *)); + if (aBucket == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + + for (int32_t iBucket = 0; iBucket < pMemTable->nBucket; iBucket++) { + STbData *pTbData = pMemTable->aBucket[iBucket]; + + while (pTbData) { + STbData *pNext = pTbData->next; + + int32_t idx = TABS(pTbData->uid) % nBucket; + pTbData->next = aBucket[idx]; + aBucket[idx] = pTbData; + + pTbData = pNext; + } + } + + taosMemoryFree(pMemTable->aBucket); + pMemTable->nBucket = nBucket; + pMemTable->aBucket = aBucket; + +_exit: + return code; +} + static int32_t tsdbGetOrCreateTbData(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid, STbData **ppTbData) { - int32_t code = 0; - int32_t idx = 0; - STbData *pTbData = NULL; - STbData *pTbDataT = &(STbData){.suid = suid, .uid = uid}; + int32_t code = 0; // get - idx = taosArraySearchIdx(pMemTable->aTbData, &pTbDataT, tbDataPCmprFn, TD_GE); - if (idx >= 0) { - pTbData = (STbData *)taosArrayGetP(pMemTable->aTbData, idx); - if (tbDataPCmprFn(&pTbDataT, &pTbData) == 0) goto _exit; - } + STbData *pTbData = tsdbGetTbDataFromMemTableImpl(pMemTable, suid, uid); + if (pTbData) goto _exit; // create SVBufPool *pPool = pMemTable->pTsdb->pVnode->inUse; @@ -344,9 +362,6 @@ static int32_t tsdbGetOrCreateTbData(SMemTable *pMemTable, tb_uid_t suid, tb_uid pTbData->uid = uid; pTbData->minKey = TSKEY_MAX; pTbData->maxKey = TSKEY_MIN; - pTbData->minVersion = VERSION_MAX; - pTbData->maxVersion = VERSION_MIN; - pTbData->maxSkmVer = -1; pTbData->pHead = NULL; pTbData->pTail = NULL; pTbData->sl.seed = taosRand(); @@ -365,21 +380,23 @@ static int32_t tsdbGetOrCreateTbData(SMemTable *pMemTable, tb_uid_t suid, tb_uid SL_NODE_FORWARD(pTbData->sl.pTail, iLevel) = NULL; } - void *p; - if (idx < 0) { - idx = taosArrayGetSize(pMemTable->aTbData); + taosWLockLatch(&pMemTable->latch); + + if (pMemTable->nTbData >= pMemTable->nBucket) { + code = tsdbMemTableRehash(pMemTable); + if (code) { + taosWUnLockLatch(&pMemTable->latch); + goto _err; + } } - taosWLockLatch(&pMemTable->latch); - p = taosArrayInsert(pMemTable->aTbData, idx, &pTbData); - taosWUnLockLatch(&pMemTable->latch); + int32_t idx = TABS(uid) % pMemTable->nBucket; + pTbData->next = pMemTable->aBucket[idx]; + pMemTable->aBucket[idx] = pTbData; + pMemTable->nTbData++; - tsdbDebug("vgId:%d, add table data %p at idx:%d", TD_VID(pMemTable->pTsdb->pVnode), pTbData, idx); + taosWUnLockLatch(&pMemTable->latch); - if (p == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } _exit: *ppTbData = pTbData; return code; @@ -589,15 +606,9 @@ static int32_t tsdbInsertTableDataImpl(SMemTable *pMemTable, STbData *pTbData, i tsdbCacheInsertLast(pMemTable->pTsdb->lruCache, pTbData->uid, pLastRow, pMemTable->pTsdb); } - pTbData->minVersion = TMIN(pTbData->minVersion, version); - pTbData->maxVersion = TMAX(pTbData->maxVersion, version); - pTbData->maxSkmVer = TMAX(pTbData->maxSkmVer, pMsgIter->sversion); - // SMemTable pMemTable->minKey = TMIN(pMemTable->minKey, pTbData->minKey); pMemTable->maxKey = TMAX(pMemTable->maxKey, pTbData->maxKey); - pMemTable->minVersion = TMIN(pMemTable->minVersion, pTbData->minVersion); - pMemTable->maxVersion = TMAX(pMemTable->maxVersion, pTbData->maxVersion); pMemTable->nRow += nRow; pRsp->numOfRows = nRow; @@ -622,3 +633,41 @@ void tsdbUnrefMemTable(SMemTable *pMemTable) { tsdbMemTableDestroy(pMemTable); } } + +static FORCE_INLINE int32_t tbDataPCmprFn(const void *p1, const void *p2) { + STbData *pTbData1 = *(STbData **)p1; + STbData *pTbData2 = *(STbData **)p2; + + if (pTbData1->suid < pTbData2->suid) { + return -1; + } else if (pTbData1->suid > pTbData2->suid) { + return 1; + } + + if (pTbData1->uid < pTbData2->uid) { + return -1; + } else if (pTbData1->uid > pTbData2->uid) { + return 1; + } + + return 0; +} + +SArray *tsdbMemTableGetTbDataArray(SMemTable *pMemTable) { + SArray *aTbDataP = taosArrayInit(pMemTable->nTbData, sizeof(STbData *)); + if (aTbDataP == NULL) goto _exit; + + for (int32_t iBucket = 0; iBucket < pMemTable->nBucket; iBucket++) { + STbData *pTbData = pMemTable->aBucket[iBucket]; + + while (pTbData) { + taosArrayPush(aTbDataP, &pTbData); + pTbData = pTbData->next; + } + } + + taosArraySort(aTbDataP, tbDataPCmprFn); + +_exit: + return aTbDataP; +} diff --git a/source/dnode/vnode/src/tsdb/tsdbOpen.c b/source/dnode/vnode/src/tsdb/tsdbOpen.c index be2828d1879e8f898649839b4925877a73eae4b6..ec760e3c57c277ed3183bb73729f7a655daa0304 100644 --- a/source/dnode/vnode/src/tsdb/tsdbOpen.c +++ b/source/dnode/vnode/src/tsdb/tsdbOpen.c @@ -86,7 +86,7 @@ int tsdbClose(STsdb **pTsdb) { if (*pTsdb) { taosThreadRwlockDestroy(&(*pTsdb)->rwLock); tsdbFSClose(*pTsdb); - tsdbCloseCache((*pTsdb)->lruCache); + tsdbCloseCache(*pTsdb); taosMemoryFreeClear(*pTsdb); } return 0; diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 0831f3d75a64edebdaa9c2b5cc58f4d8e58bfdcc..a92e8189a1fed7a23be79ded705ad852b74b3a8c 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -15,7 +15,10 @@ #include "osDef.h" #include "tsdb.h" -#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC) + +#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC) +#define ALL_ROWS_CHECKED_INDEX (INT16_MIN) +#define INITIAL_ROW_INDEX_VAL (-1) typedef enum { EXTERNAL_ROWS_PREV = 0x1, @@ -29,16 +32,23 @@ typedef struct { bool hasVal; } SIterInfo; +typedef struct { + int32_t numOfBlocks; + int32_t numOfLastBlocks; +} SBlockNumber; + typedef struct STableBlockScanInfo { uint64_t uid; TSKEY lastKey; - SMapData mapData; // block info (compressed) - SArray* pBlockList; // block data index list - SIterInfo iter; // mem buffer skip list iterator - SIterInfo iiter; // imem buffer skip list iterator - SArray* delSkyline; // delete info for this table - int32_t fileDelIndex; - bool iterInit; // whether to initialize the in-memory skip list iterator or not + SMapData mapData; // block info (compressed) + SArray* pBlockList; // block data index list + SIterInfo iter; // mem buffer skip list iterator + SIterInfo iiter; // imem buffer skip list iterator + SArray* delSkyline; // delete info for this table + int32_t fileDelIndex; // file block delete index + int32_t lastBlockDelIndex;// delete index for last block + bool iterInit; // whether to initialize the in-memory skip list iterator or not + int16_t indexInBlockL;// row position in last block } STableBlockScanInfo; typedef struct SBlockOrderWrapper { @@ -59,8 +69,10 @@ typedef struct SIOCostSummary { double buildmemBlock; int64_t headFileLoad; double headFileLoadTime; - int64_t smaData; + int64_t smaDataLoad; double smaLoadTime; + int64_t lastBlockLoad; + double lastBlockLoadTime; } SIOCostSummary; typedef struct SBlockLoadSuppInfo { @@ -71,11 +83,28 @@ typedef struct SBlockLoadSuppInfo { char** buildBuf; // build string tmp buffer, todo remove it later after all string format being updated. } SBlockLoadSuppInfo; +typedef struct SVersionRange { + uint64_t minVer; + uint64_t maxVer; +} SVersionRange; + +typedef struct SLastBlockReader { + SArray* pBlockL; + int32_t currentBlockIndex; + SBlockData lastBlockData; + STimeWindow window; + SVersionRange verRange; + int32_t order; + uint64_t uid; + int16_t* rowIndex; // row index ptr, usually from the STableBlockScanInfo->indexInBlockL +} SLastBlockReader; + typedef struct SFilesetIter { - int32_t numOfFiles; // number of total files - int32_t index; // current accessed index in the list - SArray* pFileList; // data file list - int32_t order; + int32_t numOfFiles; // number of total files + int32_t index; // current accessed index in the list + SArray* pFileList; // data file list + int32_t order; + SLastBlockReader* pLastBlockReader; // last file block reader } SFilesetIter; typedef struct SFileDataBlockInfo { @@ -87,9 +116,9 @@ typedef struct SFileDataBlockInfo { typedef struct SDataBlockIter { int32_t numOfBlocks; int32_t index; - SArray* blockList; // SArray + SArray* blockList; // SArray int32_t order; - SBlock block; // current SBlock data + SBlock block; // current SBlock data SHashObj* pTableMap; } SDataBlockIter; @@ -100,21 +129,22 @@ typedef struct SFileBlockDumpInfo { bool allDumped; } SFileBlockDumpInfo; -typedef struct SVersionRange { - uint64_t minVer; - uint64_t maxVer; -} SVersionRange; +typedef struct SUidOrderCheckInfo { + uint64_t* tableUidList; // access table uid list in uid ascending order list + int32_t currentIndex; // index in table uid list +} SUidOrderCheckInfo; typedef struct SReaderStatus { - bool loadFromFile; // check file stage - SHashObj* pTableMap; // SHash - STableBlockScanInfo* pTableIter; // table iterator used in building in-memory buffer data blocks. + bool loadFromFile; // check file stage + bool composedDataBlock; // the returned data block is a composed block or not + SHashObj* pTableMap; // SHash + STableBlockScanInfo* pTableIter; // table iterator used in building in-memory buffer data blocks. + SUidOrderCheckInfo uidCheckInfo; // check all table in uid order SFileBlockDumpInfo fBlockDumpInfo; - SDFileSet* pCurrentFileset; // current opened file set + SDFileSet* pCurrentFileset; // current opened file set SBlockData fileBlockData; SFilesetIter fileIter; SDataBlockIter blockIter; - bool composedDataBlock; // the returned data block is a composed block or not } SReaderStatus; struct STsdbReader { @@ -142,26 +172,32 @@ struct STsdbReader { static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter); static int buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t endKey, int32_t capacity, STsdbReader* pReader); -static TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader); +static TSDBROW* getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader); static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, STsdbReader* pReader, SRowMerger* pMerger); +static int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts, SRowMerger* pMerger); static int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger, STsdbReader* pReader); static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow, uint64_t uid); -static int32_t doAppendRowFromBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData, +static int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData, int32_t rowIndex); static void setComposedBlockFlag(STsdbReader* pReader, bool composed); static bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order); -static void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow, +static void doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow, STsdbReader* pReader, bool* freeTSRow); static void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STSRow** pTSRow); +static int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key, STsdbReader* pReader); + static int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STbData* pMemTbData, STbData* piMemTbData); static STsdb* getTsdbByRetentions(SVnode* pVnode, TSKEY winSKey, SRetention* retentions, const char* idstr, int8_t* pLevel); static SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_t level); +static int64_t getCurrentKeyInLastBlock(SLastBlockReader* pLastBlockReader); +static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader); +static int32_t doBuildDataBlock(STsdbReader* pReader); static int32_t setColumnIdSlotList(STsdbReader* pReader, SSDataBlock* pBlock) { SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo; @@ -182,7 +218,6 @@ static int32_t setColumnIdSlotList(STsdbReader* pReader, SSDataBlock* pBlock) { if (IS_VAR_DATA_TYPE(pCol->info.type)) { pSupInfo->buildBuf[i] = taosMemoryMalloc(pCol->info.bytes); - // tsdbInfo("-------------------%d\n", pCol->info.bytes); } } @@ -199,7 +234,7 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, const STableK } for (int32_t j = 0; j < numOfTables; ++j) { - STableBlockScanInfo info = {.lastKey = 0, .uid = idList[j].uid}; + STableBlockScanInfo info = {.lastKey = 0, .uid = idList[j].uid, .indexInBlockL = INITIAL_ROW_INDEX_VAL}; if (ASCENDING_TRAVERSE(pTsdbReader->order)) { if (info.lastKey == INT64_MIN || info.lastKey < pTsdbReader->window.skey) { info.lastKey = pTsdbReader->window.skey; @@ -231,7 +266,9 @@ static void resetDataBlockScanInfo(SHashObj* pTableMap) { p->iter.iter = tsdbTbDataIterDestroy(p->iter.iter); } - p->delSkyline = taosArrayDestroy(p->delSkyline); + p->fileDelIndex = -1; + p->delSkyline = taosArrayDestroy(p->delSkyline); + p->lastBlockDelIndex = INITIAL_ROW_INDEX_VAL; } } @@ -293,15 +330,36 @@ static void limitOutputBufferSize(const SQueryTableDataCond* pCond, int32_t* cap } // init file iterator -static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, int32_t order, const char* idstr) { +static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdbReader* pReader/*int32_t order, const char* idstr*/) { size_t numOfFileset = taosArrayGetSize(aDFileSet); - pIter->index = ASCENDING_TRAVERSE(order) ? -1 : numOfFileset; - pIter->order = order; + pIter->index = ASCENDING_TRAVERSE(pReader->order) ? -1 : numOfFileset; + pIter->order = pReader->order; pIter->pFileList = aDFileSet; pIter->numOfFiles = numOfFileset; - tsdbDebug("init fileset iterator, total files:%d %s", pIter->numOfFiles, idstr); + if (pIter->pLastBlockReader == NULL) { + pIter->pLastBlockReader = taosMemoryCalloc(1, sizeof(struct SLastBlockReader)); + if (pIter->pLastBlockReader == NULL) { + int32_t code = TSDB_CODE_OUT_OF_MEMORY; + tsdbError("failed to prepare the last block iterator, code:%d %s", tstrerror(code), pReader->idStr); + return code; + } + + SLastBlockReader* pLReader = pIter->pLastBlockReader; + pLReader->pBlockL = taosArrayInit(4, sizeof(SBlockL)); + pLReader->order = pReader->order; + pLReader->window = pReader->window; + pLReader->verRange = pReader->verRange; + pLReader->currentBlockIndex = -1; + + int32_t code = tBlockDataCreate(&pLReader->lastBlockData); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + } + + tsdbDebug("init fileset iterator, total files:%d %s", pIter->numOfFiles, pReader->idStr); return TSDB_CODE_SUCCESS; } @@ -358,16 +416,15 @@ _err: return false; } -static void resetDataBlockIterator(SDataBlockIter* pIter, int32_t order, SHashObj* pTableMap) { +static void resetDataBlockIterator(SDataBlockIter* pIter, int32_t order) { pIter->order = order; pIter->index = -1; - pIter->numOfBlocks = -1; + pIter->numOfBlocks = 0; if (pIter->blockList == NULL) { pIter->blockList = taosArrayInit(4, sizeof(SFileDataBlockInfo)); } else { taosArrayClear(pIter->blockList); } - pIter->pTableMap = pTableMap; } static void cleanupDataBlockIterator(SDataBlockIter* pIter) { taosArrayDestroy(pIter->blockList); } @@ -419,7 +476,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd pReader->pTsdb = getTsdbByRetentions(pVnode, pCond->twindows.skey, pVnode->config.tsdbCfg.retentions, idstr, &level); pReader->suid = pCond->suid; pReader->order = pCond->order; - pReader->capacity = capacity; + pReader->capacity = 4096; pReader->idStr = (idstr != NULL) ? strdup(idstr) : NULL; pReader->verRange = getQueryVerRange(pVnode, pCond, level); pReader->type = pCond->type; @@ -440,7 +497,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd pSup->tsColAgg.colId = PRIMARYKEY_TIMESTAMP_COL_ID; - code = tBlockDataInit(&pReader->status.fileBlockData); + code = tBlockDataCreate(&pReader->status.fileBlockData); if (code != TSDB_CODE_SUCCESS) { terrno = code; goto _end; @@ -463,98 +520,18 @@ _end: return code; } -// void tsdbResetQueryHandleForNewTable(STsdbReader* queryHandle, SQueryTableDataCond* pCond, STableListInfo* tableList, -// int32_t tWinIdx) { -// STsdbReader* pTsdbReadHandle = queryHandle; - -// pTsdbReadHandle->order = pCond->order; -// pTsdbReadHandle->window = pCond->twindows[tWinIdx]; -// pTsdbReadHandle->type = TSDB_QUERY_TYPE_ALL; -// pTsdbReadHandle->cur.fid = -1; -// pTsdbReadHandle->cur.win = TSWINDOW_INITIALIZER; -// pTsdbReadHandle->checkFiles = true; -// pTsdbReadHandle->activeIndex = 0; // current active table index -// pTsdbReadHandle->locateStart = false; -// pTsdbReadHandle->loadExternalRow = pCond->loadExternalRows; - -// if (ASCENDING_TRAVERSE(pCond->order)) { -// assert(pTsdbReadHandle->window.skey <= pTsdbReadHandle->window.ekey); -// } else { -// assert(pTsdbReadHandle->window.skey >= pTsdbReadHandle->window.ekey); -// } - -// // allocate buffer in order to load data blocks from file -// memset(pTsdbReadHandle->suppInfo.pstatis, 0, sizeof(SColumnDataAgg)); -// memset(pTsdbReadHandle->suppInfo.plist, 0, POINTER_BYTES); - -// tsdbInitDataBlockLoadInfo(&pTsdbReadHandle->dataBlockLoadInfo); -// tsdbInitCompBlockLoadInfo(&pTsdbReadHandle->compBlockLoadInfo); - -// SArray* pTable = NULL; -// // STsdbMeta* pMeta = tsdbGetMeta(pTsdbReadHandle->pTsdb); - -// // pTsdbReadHandle->pTableCheckInfo = destroyTableCheckInfo(pTsdbReadHandle->pTableCheckInfo); - -// pTsdbReadHandle->pTableCheckInfo = NULL; // createDataBlockScanInfo(pTsdbReadHandle, groupList, pMeta, -// // &pTable); -// if (pTsdbReadHandle->pTableCheckInfo == NULL) { -// // tsdbReaderClose(pTsdbReadHandle); -// terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; -// } - -// // pTsdbReadHandle->prev = doFreeColumnInfoData(pTsdbReadHandle->prev); -// // pTsdbReadHandle->next = doFreeColumnInfoData(pTsdbReadHandle->next); -// } - -// SArray* tsdbGetQueriedTableList(STsdbReader** pHandle) { -// assert(pHandle != NULL); - -// STsdbReader* pTsdbReadHandle = (STsdbReader*)pHandle; - -// size_t size = taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo); -// SArray* res = taosArrayInit(size, POINTER_BYTES); -// return res; -// } - -// static int32_t binarySearchForBlock(SBlock* pBlock, int32_t numOfBlocks, TSKEY skey, int32_t order) { -// int32_t firstSlot = 0; -// int32_t lastSlot = numOfBlocks - 1; - -// int32_t midSlot = firstSlot; - -// while (1) { -// numOfBlocks = lastSlot - firstSlot + 1; -// midSlot = (firstSlot + (numOfBlocks >> 1)); - -// if (numOfBlocks == 1) break; - -// if (skey > pBlock[midSlot].maxKey.ts) { -// if (numOfBlocks == 2) break; -// if ((order == TSDB_ORDER_DESC) && (skey < pBlock[midSlot + 1].minKey.ts)) break; -// firstSlot = midSlot + 1; -// } else if (skey < pBlock[midSlot].minKey.ts) { -// if ((order == TSDB_ORDER_ASC) && (skey > pBlock[midSlot - 1].maxKey.ts)) break; -// lastSlot = midSlot - 1; -// } else { -// break; // got the slot -// } -// } - -// return midSlot; -// } - static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, SArray* pIndexList) { SArray* aBlockIdx = taosArrayInit(8, sizeof(SBlockIdx)); int64_t st = taosGetTimestampUs(); - int32_t code = tsdbReadBlockIdx(pFileReader, aBlockIdx, NULL); + int32_t code = tsdbReadBlockIdx(pFileReader, aBlockIdx); if (code != TSDB_CODE_SUCCESS) { goto _end; } size_t num = taosArrayGetSize(aBlockIdx); if (num == 0) { - taosArrayClear(aBlockIdx); + taosArrayDestroy(aBlockIdx); return TSDB_CODE_SUCCESS; } @@ -594,24 +571,29 @@ _end: return code; } -static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_t* numOfValidTables, - int32_t* numOfBlocks) { - size_t numOfTables = taosArrayGetSize(pIndexList); - *numOfValidTables = 0; - - int64_t st = taosGetTimestampUs(); - size_t size = 0; - +static void cleanupTableScanInfo(SHashObj* pTableMap) { STableBlockScanInfo* px = NULL; while (1) { - px = taosHashIterate(pReader->status.pTableMap, px); + px = taosHashIterate(pTableMap, px); if (px == NULL) { break; } + // reset the index in last block when handing a new file + px->indexInBlockL = INITIAL_ROW_INDEX_VAL; tMapDataClear(&px->mapData); taosArrayClear(px->pBlockList); } +} + +static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SArray* pLastBlockIndex, + SBlockNumber * pBlockNum, SArray* pQualifiedLastBlock) { + int32_t numOfQTable = 0; + size_t sizeInDisk = 0; + size_t numOfTables = taosArrayGetSize(pIndexList); + + int64_t st = taosGetTimestampUs(); + cleanupTableScanInfo(pReader->status.pTableMap); for (int32_t i = 0; i < numOfTables; ++i) { SBlockIdx* pBlockIdx = taosArrayGet(pIndexList, i); @@ -619,9 +601,9 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_ STableBlockScanInfo* pScanInfo = taosHashGet(pReader->status.pTableMap, &pBlockIdx->uid, sizeof(int64_t)); tMapDataReset(&pScanInfo->mapData); - tsdbReadBlock(pReader->pFileReader, pBlockIdx, &pScanInfo->mapData, NULL); + tsdbReadBlock(pReader->pFileReader, pBlockIdx, &pScanInfo->mapData); - size += pScanInfo->mapData.nData; + sizeInDisk += pScanInfo->mapData.nData; for (int32_t j = 0; j < pScanInfo->mapData.nItem; ++j) { SBlock block = {0}; tMapDataGetItemByIdx(&pScanInfo->mapData, j, &block, tGetBlock); @@ -632,7 +614,7 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_ } // 2. version range check - if (block.minVersion > pReader->verRange.maxVer || block.maxVersion < pReader->verRange.minVer) { + if (block.minVer > pReader->verRange.maxVer || block.maxVer < pReader->verRange.minVer) { continue; } @@ -642,30 +624,56 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_ return TSDB_CODE_OUT_OF_MEMORY; } - (*numOfBlocks) += 1; + pBlockNum->numOfBlocks += 1; } if (pScanInfo->pBlockList != NULL && taosArrayGetSize(pScanInfo->pBlockList) > 0) { - (*numOfValidTables) += 1; + numOfQTable += 1; + } + } + + size_t numOfLast = taosArrayGetSize(pLastBlockIndex); + for(int32_t i = 0; i < numOfLast; ++i) { + SBlockL* pLastBlock = taosArrayGet(pLastBlockIndex, i); + if (pLastBlock->suid != pReader->suid) { + continue; + } + + { + // 1. time range check + if (pLastBlock->minKey > pReader->window.ekey || pLastBlock->maxKey < pReader->window.skey) { + continue; + } + + // 2. version range check + if (pLastBlock->minVer > pReader->verRange.maxVer || pLastBlock->maxVer < pReader->verRange.minVer) { + continue; + } + + pBlockNum->numOfLastBlocks += 1; + taosArrayPush(pQualifiedLastBlock, pLastBlock); } } + int32_t total = pBlockNum->numOfLastBlocks + pBlockNum->numOfBlocks; + double el = (taosGetTimestampUs() - st) / 1000.0; - tsdbDebug("load block of %d tables completed, blocks:%d in %d tables, size:%.2f Kb, elapsed time:%.2f ms %s", - numOfTables, *numOfBlocks, *numOfValidTables, size / 1000.0, el, pReader->idStr); + tsdbDebug( + "load block of %d tables completed, blocks:%d in %d tables, lastBlock:%d, block-info-size:%.2f Kb, elapsed " + "time:%.2f ms %s", + numOfTables, pBlockNum->numOfBlocks, numOfQTable, pBlockNum->numOfLastBlocks, sizeInDisk / 1000.0, el, + pReader->idStr); - pReader->cost.numOfBlocks += (*numOfBlocks); + pReader->cost.numOfBlocks += total; pReader->cost.headFileLoadTime += el; return TSDB_CODE_SUCCESS; } -// todo remove pblock parameter -static void setBlockAllDumped(SFileBlockDumpInfo* pDumpInfo, SBlock* pBlock, int32_t order) { +static void setBlockAllDumped(SFileBlockDumpInfo* pDumpInfo, int64_t maxKey, int32_t order) { int32_t step = ASCENDING_TRAVERSE(order) ? 1 : -1; - pDumpInfo->allDumped = true; - pDumpInfo->lastKey = pBlock->maxKey.ts + step; + pDumpInfo->lastKey = maxKey + step; } static void doCopyColVal(SColumnInfoData* pColInfoData, int32_t rowIndex, int32_t colIndex, SColVal* pColVal, @@ -685,8 +693,13 @@ static void doCopyColVal(SColumnInfoData* pColInfoData, int32_t rowIndex, int32_ } static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter) { - SFileDataBlockInfo* pFBlockInfo = taosArrayGet(pBlockIter->blockList, pBlockIter->index); - return pFBlockInfo; + if (taosArrayGetSize(pBlockIter->blockList) == 0) { + ASSERT(pBlockIter->numOfBlocks == taosArrayGetSize(pBlockIter->blockList)); + return NULL; + } + + SFileDataBlockInfo* pBlockInfo = taosArrayGet(pBlockIter->blockList, pBlockIter->index); + return pBlockInfo; } static SBlock* getCurrentBlock(SDataBlockIter* pBlockIter) { return &pBlockIter->block; } @@ -736,19 +749,20 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn pColData = taosArrayGet(pResBlock->pDataBlock, i); SColData* pData = tBlockDataGetColDataByIdx(pBlockData, colIndex); - - if (pData->cid == pColData->info.colId) { + if (pData->cid < pColData->info.colId) { + colIndex += 1; + } else if (pData->cid == pColData->info.colId) { for (int32_t j = pDumpInfo->rowIndex; j < endIndex && j >= 0; j += step) { tColDataGetValue(pData, j, &cv); doCopyColVal(pColData, rowIndex++, i, &cv, pSupInfo); } colIndex += 1; + i += 1; ASSERT(rowIndex == remain); } else { // the specified column does not exist in file block, fill with null data colDataAppendNNULL(pColData, 0, remain); + i += 1; } - - i += 1; } while (i < numOfOutputCols) { @@ -760,7 +774,7 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn pResBlock->info.rows = remain; pDumpInfo->rowIndex += step * remain; - setBlockAllDumped(pDumpInfo, pBlock, pReader->order); + setBlockAllDumped(pDumpInfo, pBlock->maxKey.ts, pReader->order); double elapsedTime = (taosGetTimestampUs() - st) / 1000.0; pReader->cost.blockLoadTime += elapsedTime; @@ -769,48 +783,39 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn tsdbDebug("%p copy file block to sdatablock, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64 ", rows:%d, remain:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s", pReader, pBlockIter->index, pFBlock->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, remain, unDumpedRows, - pBlock->minVersion, pBlock->maxVersion, elapsedTime, pReader->idStr); + pBlock->minVer, pBlock->maxVer, elapsedTime, pReader->idStr); return TSDB_CODE_SUCCESS; } -static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockIter, - STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) { +static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockIter, SBlockData* pBlockData) { int64_t st = taosGetTimestampUs(); - SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter); - SBlock* pBlock = getCurrentBlock(pBlockIter); - - SSDataBlock* pResBlock = pReader->pResBlock; - int32_t numOfCols = blockDataGetNumOfCols(pResBlock); - - SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo; + SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter); SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; + ASSERT(pBlockInfo != NULL); - SBlockIdx blockIdx = {.suid = pReader->suid, .uid = pBlockScanInfo->uid}; - int32_t code = - tsdbReadColData(pReader->pFileReader, &blockIdx, pBlock, pSupInfo->colIds, numOfCols, pBlockData, NULL, NULL); + SBlock* pBlock = getCurrentBlock(pBlockIter); + int32_t code = tsdbReadDataBlock(pReader->pFileReader, pBlock, pBlockData); if (code != TSDB_CODE_SUCCESS) { - goto _error; + tsdbError("%p error occurs in loading file block, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64 + ", rows:%d, code:%s %s", + pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, pBlock->nRow, + tstrerror(code), pReader->idStr); + return code; } double elapsedTime = (taosGetTimestampUs() - st) / 1000.0; - pReader->cost.blockLoadTime += elapsedTime; - pDumpInfo->allDumped = false; - tsdbDebug("%p load file block into buffer, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64 + tsdbDebug("%p load file block into buffer, global index:%d, index in table block list:%d, brange:%" PRId64 "-%" PRId64 ", rows:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s", - pReader, pBlockIter->index, pFBlock->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, pBlock->nRow, - pBlock->minVersion, pBlock->maxVersion, elapsedTime, pReader->idStr); + pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, pBlock->nRow, + pBlock->minVer, pBlock->maxVer, elapsedTime, pReader->idStr); - return TSDB_CODE_SUCCESS; + pReader->cost.blockLoadTime += elapsedTime; + pDumpInfo->allDumped = false; -_error: - tsdbError("%p error occurs in loading file block, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64 - ", rows:%d, %s", - pReader, pBlockIter->index, pFBlock->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, pBlock->nRow, - pReader->idStr); - return code; + return TSDB_CODE_SUCCESS; } static void cleanupBlockOrderSupporter(SBlockOrderSupporter* pSup) { @@ -864,11 +869,12 @@ static int32_t fileDataBlockOrderCompar(const void* pLeft, const void* pRight, v } static int32_t doSetCurrentBlock(SDataBlockIter* pBlockIter) { - SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter); - STableBlockScanInfo* pScanInfo = taosHashGet(pBlockIter->pTableMap, &pFBlock->uid, sizeof(pFBlock->uid)); - - int32_t* mapDataIndex = taosArrayGet(pScanInfo->pBlockList, pFBlock->tbBlockIdx); - tMapDataGetItemByIdx(&pScanInfo->mapData, *mapDataIndex, &pBlockIter->block, tGetBlock); + SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter); + if (pBlockInfo != NULL) { + STableBlockScanInfo* pScanInfo = taosHashGet(pBlockIter->pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid)); + int32_t* mapDataIndex = taosArrayGet(pScanInfo->pBlockList, pBlockInfo->tbBlockIdx); + tMapDataGetItemByIdx(&pScanInfo->mapData, *mapDataIndex, &pBlockIter->block, tGetBlock); + } #if 0 qDebug("check file block, table uid:%"PRIu64" index:%d offset:%"PRId64", ", pScanInfo->uid, *mapDataIndex, pBlockIter->block.aSubBlock[0].offset); @@ -882,6 +888,7 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte pBlockIter->numOfBlocks = numOfBlocks; taosArrayClear(pBlockIter->blockList); + pBlockIter->pTableMap = pReader->status.pTableMap; // access data blocks according to the offset of each block in asc/desc order. int32_t numOfTables = (int32_t)taosHashGetSize(pReader->status.pTableMap); @@ -945,7 +952,7 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte int64_t et = taosGetTimestampUs(); tsdbDebug("%p create blocks info struct completed for one table, %d blocks not sorted, elapsed time:%.2f ms %s", - pReader, cnt, (et - st) / 1000.0, pReader->idStr); + pReader, numOfBlocks, (et - st) / 1000.0, pReader->idStr); pBlockIter->index = asc ? 0 : (numOfBlocks - 1); cleanupBlockOrderSupporter(&sup); @@ -956,7 +963,7 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte tsdbDebug("%p create data blocks info struct completed, %d blocks in %d tables %s", pReader, cnt, sup.numOfTables, pReader->idStr); - assert(cnt <= numOfBlocks && sup.numOfTables <= numOfTables); + ASSERT(cnt <= numOfBlocks && sup.numOfTables <= numOfTables); SMultiwayMergeTreeInfo* pTree = NULL; uint8_t ret = tMergeTreeCreate(&pTree, sup.numOfTables, &sup, fileDataBlockOrderCompar); @@ -983,7 +990,7 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte } int64_t et = taosGetTimestampUs(); - tsdbDebug("%p %d data blocks access order completed, elapsed time:%.2f ms %s", pReader, cnt, (et - st) / 1000.0, + tsdbDebug("%p %d data blocks access order completed, elapsed time:%.2f ms %s", pReader, numOfBlocks, (et - st) / 1000.0, pReader->idStr); cleanupBlockOrderSupporter(&sup); taosMemoryFree(pTree); @@ -1014,8 +1021,8 @@ static bool blockIteratorNext(SDataBlockIter* pBlockIter) { static int32_t dataBlockPartiallyRequired(STimeWindow* pWindow, SVersionRange* pVerRange, SBlock* pBlock) { return (pWindow->ekey < pBlock->maxKey.ts && pWindow->ekey >= pBlock->minKey.ts) || (pWindow->skey > pBlock->minKey.ts && pWindow->skey <= pBlock->maxKey.ts) || - (pVerRange->minVer > pBlock->minVersion && pVerRange->minVer <= pBlock->maxVersion) || - (pVerRange->maxVer < pBlock->maxVersion && pVerRange->maxVer >= pBlock->minVersion); + (pVerRange->minVer > pBlock->minVer && pVerRange->minVer <= pBlock->maxVer) || + (pVerRange->maxVer < pBlock->maxVer && pVerRange->maxVer >= pBlock->minVer); } static SBlock* getNeighborBlockOfSameTable(SFileDataBlockInfo* pFBlockInfo, STableBlockScanInfo* pTableBlockScanInfo, @@ -1095,8 +1102,8 @@ static bool bufferDataInFileBlockGap(int32_t order, TSDBKEY key, SBlock* pBlock) } static bool keyOverlapFileBlock(TSDBKEY key, SBlock* pBlock, SVersionRange* pVerRange) { - return (key.ts >= pBlock->minKey.ts && key.ts <= pBlock->maxKey.ts) && (pBlock->maxVersion >= pVerRange->minVer) && - (pBlock->minVersion <= pVerRange->maxVer); + return (key.ts >= pBlock->minKey.ts && key.ts <= pBlock->maxKey.ts) && (pBlock->maxVer >= pVerRange->minVer) && + (pBlock->minVer <= pVerRange->maxVer); } static bool doCheckforDatablockOverlap(STableBlockScanInfo* pBlockScanInfo, const SBlock* pBlock) { @@ -1105,11 +1112,11 @@ static bool doCheckforDatablockOverlap(STableBlockScanInfo* pBlockScanInfo, cons for (int32_t i = pBlockScanInfo->fileDelIndex; i < num; i += 1) { TSDBKEY* p = taosArrayGet(pBlockScanInfo->delSkyline, i); if (p->ts >= pBlock->minKey.ts && p->ts <= pBlock->maxKey.ts) { - if (p->version >= pBlock->minVersion) { + if (p->version >= pBlock->minVer) { return true; } } else if (p->ts < pBlock->minKey.ts) { // p->ts < pBlock->minKey.ts - if (p->version >= pBlock->minVersion) { + if (p->version >= pBlock->minVer) { if (i < num - 1) { TSDBKEY* pnext = taosArrayGet(pBlockScanInfo->delSkyline, i + 1); if (i + 1 == num - 1) { // pnext is the last point @@ -1117,7 +1124,7 @@ static bool doCheckforDatablockOverlap(STableBlockScanInfo* pBlockScanInfo, cons return true; } } else { - if (pnext->ts >= pBlock->minKey.ts && pnext->version >= pBlock->minVersion) { + if (pnext->ts >= pBlock->minKey.ts && pnext->version >= pBlock->minVer) { return true; } } @@ -1169,7 +1176,7 @@ static bool overlapWithDelSkyline(STableBlockScanInfo* pBlockScanInfo, const SBl // 4. output buffer should be large enough to hold all rows in current block // 5. delete info should not overlap with current block data static bool fileBlockShouldLoad(STsdbReader* pReader, SFileDataBlockInfo* pFBlock, SBlock* pBlock, - STableBlockScanInfo* pScanInfo, TSDBKEY key) { + STableBlockScanInfo* pScanInfo, TSDBKEY key, SLastBlockReader* pLastBlockReader) { int32_t neighborIndex = 0; SBlock* pNeighbor = getNeighborBlockOfSameTable(pFBlock, pScanInfo, &neighborIndex, pReader->order); @@ -1184,8 +1191,30 @@ static bool fileBlockShouldLoad(STsdbReader* pReader, SFileDataBlockInfo* pFBloc bool hasDup = (pBlock->nSubBlock == 1) ? pBlock->hasDup : true; bool overlapWithDel = overlapWithDelSkyline(pScanInfo, pBlock, pReader->order); - return (overlapWithNeighbor || hasDup || dataBlockPartiallyRequired(&pReader->window, &pReader->verRange, pBlock) || - keyOverlapFileBlock(key, pBlock, &pReader->verRange) || (pBlock->nRow > pReader->capacity) || overlapWithDel); + // todo here we need to each key in the last files to identify if it is really overlapped with last block + bool overlapWithlastBlock = false; + if (taosArrayGetSize(pLastBlockReader->pBlockL) > 0 && (pLastBlockReader->currentBlockIndex != -1)) { + SBlockL *pBlockL = taosArrayGet(pLastBlockReader->pBlockL, pLastBlockReader->currentBlockIndex); + overlapWithlastBlock = !(pBlock->maxKey.ts < pBlockL->minKey || pBlock->minKey.ts > pBlockL->maxKey); + } + + bool moreThanOutputCapacity = pBlock->nRow > pReader->capacity; + bool partiallyRequired = dataBlockPartiallyRequired(&pReader->window, &pReader->verRange, pBlock); + bool overlapWithKey = keyOverlapFileBlock(key, pBlock, &pReader->verRange); + + bool loadDataBlock = (overlapWithNeighbor || hasDup || partiallyRequired || overlapWithKey || + moreThanOutputCapacity || overlapWithDel || overlapWithlastBlock); + + // log the reason why load the datablock for profile + if (loadDataBlock) { + tsdbDebug("%p uid:%" PRIu64 + " need to load the datablock, overlapwithneighborblock:%d, hasDup:%d, partiallyRequired:%d, " + "overlapWithKey:%d, greaterThanBuf:%d, overlapWithDel:%d, overlapWithlastBlock:%d, %s", + pReader, pFBlock->uid, overlapWithNeighbor, hasDup, partiallyRequired, overlapWithKey, + moreThanOutputCapacity, overlapWithDel, overlapWithlastBlock, pReader->idStr); + } + + return loadDataBlock; } static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, int64_t endKey) { @@ -1224,7 +1253,7 @@ static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pB int64_t nextKey = pBlockData->aTSKEY[pDumpInfo->rowIndex + step]; if (nextKey != key) { // merge is not needed - doAppendRowFromBlock(pReader->pResBlock, pReader, pBlockData, pDumpInfo->rowIndex); + doAppendRowFromFileBlock(pReader->pResBlock, pReader, pBlockData, pDumpInfo->rowIndex); pDumpInfo->rowIndex += step; return true; } @@ -1259,88 +1288,354 @@ static FORCE_INLINE STSchema* doGetSchemaForTSRow(int32_t sversion, STsdbReader* } static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow, - SIterInfo* pIter, int64_t key) { + SIterInfo* pIter, int64_t key, SLastBlockReader* pLastBlockReader) { SRowMerger merge = {0}; STSRow* pTSRow = NULL; SBlockData* pBlockData = &pReader->status.fileBlockData; SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; + int64_t tsLast = INT64_MIN; + if ((pLastBlockReader->lastBlockData.nRow > 0) && hasDataInLastBlock(pLastBlockReader)) { + tsLast = getCurrentKeyInLastBlock(pLastBlockReader); + } + TSDBKEY k = TSDBROW_KEY(pRow); TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex); - SArray* pDelList = pBlockScanInfo->delSkyline; - bool freeTSRow = false; - uint64_t uid = pBlockScanInfo->uid; - // ascending order traverse - if (ASCENDING_TRAVERSE(pReader->order)) { - if (key < k.ts) { - // imem & mem are all empty, only file exist - if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) { - return TSDB_CODE_SUCCESS; - } else { - tRowMergerInit(&merge, &fRow, pReader->pSchema); - doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge); - tRowMergerGetRow(&merge, &pTSRow); - freeTSRow = true; - } - } else if (k.ts < key) { // k.ts < key - doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader, &freeTSRow); - } else { // k.ts == key, ascending order: file block ----> imem rows -----> mem rows + SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData; + + int64_t minKey = 0; + if (pReader->order == TSDB_ORDER_ASC) { + minKey = INT64_MAX; // chosen the minimum value + if (minKey > tsLast && hasDataInLastBlock(pLastBlockReader)) { + minKey = tsLast; + } + + if (minKey > k.ts) { + minKey = k.ts; + } + + if (minKey > key && pBlockData->nRow > 0) { + minKey = key; + } + } else { + minKey = INT64_MIN; + if (minKey < tsLast && hasDataInLastBlock(pLastBlockReader)) { + minKey = tsLast; + } + + if (minKey < k.ts) { + minKey = k.ts; + } + + if (minKey < key && pBlockData->nRow > 0) { + minKey = key; + } + } + + bool init = false; + + // ASC: file block ---> last block -----> imem -----> mem + //DESC: mem -----> imem -----> last block -----> file block + if (pReader->order == TSDB_ORDER_ASC) { + if (minKey == key) { + init = true; tRowMergerInit(&merge, &fRow, pReader->pSchema); doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge); + } + + if (minKey == tsLast) { + TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex); + if (init) { + tRowMerge(&merge, &fRow1); + } else { + init = true; + tRowMergerInit(&merge, &fRow1, pReader->pSchema); + } + doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge); + } - tRowMerge(&merge, pRow); + if (minKey == k.ts) { + if (init) { + tRowMerge(&merge, pRow); + } else { + init = true; + STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid); + tRowMergerInit(&merge, pRow, pSchema); + } + doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); + } + } else { + if (minKey == k.ts) { + init = true; + STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid); + tRowMergerInit(&merge, pRow, pSchema); doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); + } - tRowMergerGetRow(&merge, &pTSRow); - freeTSRow = true; + if (minKey == tsLast) { + TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex); + if (init) { + tRowMerge(&merge, &fRow1); + } else { + init = true; + tRowMergerInit(&merge, &fRow1, pReader->pSchema); + } + doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge); } - } else { // descending order scan - if (key < k.ts) { - doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader, &freeTSRow); - } else if (k.ts < key) { - if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) { - return TSDB_CODE_SUCCESS; + + if (minKey == key) { + if (init) { + tRowMerge(&merge, &fRow); } else { + init = true; + tRowMergerInit(&merge, &fRow, pReader->pSchema); + } + doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge); + } + } + + tRowMergerGetRow(&merge, &pTSRow); + doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid); + + taosMemoryFree(pTSRow); + tRowMergerClear(&merge); + return TSDB_CODE_SUCCESS; +} + +static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader, STsdbReader* pReader, + STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData, + bool mergeBlockData) { + SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData; + int64_t tsLastBlock = getCurrentKeyInLastBlock(pLastBlockReader); + + STSRow* pTSRow = NULL; + SRowMerger merge = {0}; + + TSDBROW fRow = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex); + + tRowMergerInit(&merge, &fRow, pReader->pSchema); + doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge); + + // merge with block data if ts == key + if (mergeBlockData) { + doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge); + } + + tRowMergerGetRow(&merge, &pTSRow); + doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid); + + taosMemoryFree(pTSRow); + tRowMergerClear(&merge); + return TSDB_CODE_SUCCESS; +} + +static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader* pLastBlockReader, int64_t key, + STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) { + SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; + + if (pBlockData->nRow > 0) { + // no last block available, only data block exists + if (pLastBlockReader->lastBlockData.nRow == 0 || (!hasDataInLastBlock(pLastBlockReader))) { + return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader); + } + + // row in last file block + TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex); + int64_t ts = getCurrentKeyInLastBlock(pLastBlockReader); + ASSERT(ts >= key); + + if (ASCENDING_TRAVERSE(pReader->order)) { + if (key < ts) { // imem, mem are all empty, file blocks (data blocks and last block) exist + return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader); + } else if (key == ts) { + STSRow* pTSRow = NULL; + SRowMerger merge = {0}; + tRowMergerInit(&merge, &fRow, pReader->pSchema); doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge); + doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, &merge); + tRowMergerGetRow(&merge, &pTSRow); - freeTSRow = true; + doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid); + + taosMemoryFree(pTSRow); + tRowMergerClear(&merge); + return TSDB_CODE_SUCCESS; + } else { + ASSERT(0); + return TSDB_CODE_SUCCESS; } - } else { // descending order: mem rows -----> imem rows ------> file block - STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid); + } else { // desc order + return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, pBlockData, true); + } + } else { // only last block exists + return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, NULL, false); + } +} - tRowMergerInit(&merge, pRow, pSchema); - doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); +static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData, + SLastBlockReader* pLastBlockReader) { + SRowMerger merge = {0}; + STSRow* pTSRow = NULL; - tRowMerge(&merge, &fRow); - doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge); + SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; + SArray* pDelList = pBlockScanInfo->delSkyline; - tRowMergerGetRow(&merge, &pTSRow); - freeTSRow = true; + TSDBROW* pRow = getValidMemRow(&pBlockScanInfo->iter, pDelList, pReader); + TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pDelList, pReader); + ASSERT(pRow != NULL && piRow != NULL); + + SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData; + int64_t tsLast = INT64_MIN; + if (hasDataInLastBlock(pLastBlockReader)) { + tsLast = getCurrentKeyInLastBlock(pLastBlockReader); + } + + int64_t key = pBlockData->aTSKEY[pDumpInfo->rowIndex]; + + TSDBKEY k = TSDBROW_KEY(pRow); + TSDBKEY ik = TSDBROW_KEY(piRow); + + int64_t minKey = 0; + if (ASCENDING_TRAVERSE(pReader->order)) { + minKey = INT64_MAX; // let's find the minimum + if (minKey > k.ts) { + minKey = k.ts; + } + + if (minKey > ik.ts) { + minKey = ik.ts; + } + + if (minKey > key && pBlockData->nRow > 0) { + minKey = key; + } + + if (minKey > tsLast && hasDataInLastBlock(pLastBlockReader)) { + minKey = tsLast; + } + } else { + minKey = INT64_MIN; // let find the maximum ts value + if (minKey < k.ts) { + minKey = k.ts; + } + + if (minKey < ik.ts) { + minKey = ik.ts; + } + + if (minKey < key && pBlockData->nRow > 0) { + minKey = key; + } + + if (minKey < tsLast && hasDataInLastBlock(pLastBlockReader)) { + minKey = tsLast; } } - tRowMergerClear(&merge); - doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid); + bool init = false; - if (freeTSRow) { - taosMemoryFree(pTSRow); + // ASC: file block -----> last block -----> imem -----> mem + // DESC: mem -----> imem -----> last block -----> file block + if (ASCENDING_TRAVERSE(pReader->order)) { + if (minKey == key) { + init = true; + TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex); + tRowMergerInit(&merge, &fRow, pReader->pSchema); + doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge); + } + + if (minKey == tsLast) { + TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex); + if (init) { + tRowMerge(&merge, &fRow1); + } else { + init = true; + tRowMergerInit(&merge, &fRow1, pReader->pSchema); + } + doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge); + } + + if (minKey == ik.ts) { + if (init) { + tRowMerge(&merge, piRow); + } else { + init = true; + STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(piRow), pReader, pBlockScanInfo->uid); + tRowMergerInit(&merge, piRow, pSchema); + } + doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, &merge, pReader); + } + + if (minKey == k.ts) { + if (init) { + tRowMerge(&merge, pRow); + } else { + STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid); + tRowMergerInit(&merge, pRow, pSchema); + } + doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); + } + } else { + if (minKey == k.ts) { + init = true; + STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid); + tRowMergerInit(&merge, pRow, pSchema); + doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); + } + + if (minKey == ik.ts) { + if (init) { + tRowMerge(&merge, piRow); + } else { + init = true; + STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(piRow), pReader, pBlockScanInfo->uid); + tRowMergerInit(&merge, piRow, pSchema); + } + doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, &merge, pReader); + } + + if (minKey == tsLast) { + TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex); + if (init) { + tRowMerge(&merge, &fRow1); + } else { + init = true; + tRowMergerInit(&merge, &fRow1, pReader->pSchema); + } + doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge); + } + + if (minKey == key) { + TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex); + if (!init) { + tRowMergerInit(&merge, &fRow, pReader->pSchema); + } else { + tRowMerge(&merge, &fRow); + } + doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge); + } } + tRowMergerGetRow(&merge, &pTSRow); + doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid); + + taosMemoryFree(pTSRow); + tRowMergerClear(&merge); return TSDB_CODE_SUCCESS; } -static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo) { +#if 0 +static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) { SRowMerger merge = {0}; STSRow* pTSRow = NULL; SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; - SBlockData* pBlockData = &pReader->status.fileBlockData; SArray* pDelList = pBlockScanInfo->delSkyline; - TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pDelList, pReader); - TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pDelList, pReader); + TSDBROW* pRow = getValidMemRow(&pBlockScanInfo->iter, pDelList, pReader); + TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pDelList, pReader); ASSERT(pRow != NULL && piRow != NULL); int64_t key = pBlockData->aTSKEY[pDumpInfo->rowIndex]; @@ -1377,7 +1672,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* // [3] ik.ts < key <= k.ts // [4] ik.ts < k.ts <= key if (ik.ts < k.ts) { - doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow); + doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow); doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid); if (freeTSRow) { taosMemoryFree(pTSRow); @@ -1388,7 +1683,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* // [5] k.ts < key <= ik.ts // [6] k.ts < ik.ts <= key if (k.ts < ik.ts) { - doMergeMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, &pTSRow, pReader, &freeTSRow); + doMergeMemTableMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, &pTSRow, pReader, &freeTSRow); doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid); if (freeTSRow) { taosMemoryFree(pTSRow); @@ -1434,7 +1729,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* // [3] ik.ts > k.ts >= Key // [4] ik.ts > key >= k.ts if (ik.ts > key) { - doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow); + doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow); doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid); if (freeTSRow) { taosMemoryFree(pTSRow); @@ -1457,7 +1752,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* //[7] key = ik.ts > k.ts if (key == ik.ts) { - doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow); + doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow); TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex); tRowMerge(&merge, &fRow); @@ -1474,107 +1769,233 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* ASSERT(0); return -1; } +#endif static bool isValidFileBlockRow(SBlockData* pBlockData, SFileBlockDumpInfo* pDumpInfo, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader) { + // it is an multi-table data block + if (pBlockData->aUid != NULL) { + uint64_t uid = pBlockData->aUid[pDumpInfo->rowIndex]; + if (uid != pBlockScanInfo->uid) { // move to next row + return false; + } + } + // check for version and time range int64_t ver = pBlockData->aVersion[pDumpInfo->rowIndex]; if (ver > pReader->verRange.maxVer || ver < pReader->verRange.minVer) { return false; } - int64_t ts = pBlockData->aTSKEY[pDumpInfo->rowIndex]; - if (ts > pReader->window.ekey || ts < pReader->window.skey) { - return false; + int64_t ts = pBlockData->aTSKEY[pDumpInfo->rowIndex]; + if (ts > pReader->window.ekey || ts < pReader->window.skey) { + return false; + } + + TSDBKEY k = {.ts = ts, .version = ver}; + if (hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->fileDelIndex, &k, pReader->order)) { + return false; + } + + return true; +} + +static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); } + +static void initLastBlockReader(SLastBlockReader* pLastBlockReader, uint64_t uid, int16_t* startPos) { + pLastBlockReader->uid = uid; + pLastBlockReader->rowIndex = startPos; + + if (*startPos == -1) { + if (ASCENDING_TRAVERSE(pLastBlockReader->order)) { + // do nothing + } else { + *startPos = pLastBlockReader->lastBlockData.nRow; + } + } +} + +static void setAllRowsChecked(SLastBlockReader *pLastBlockReader) { + *pLastBlockReader->rowIndex = ALL_ROWS_CHECKED_INDEX; +} + +static bool nextRowInLastBlock(SLastBlockReader *pLastBlockReader, STableBlockScanInfo* pBlockScanInfo) { + bool asc = ASCENDING_TRAVERSE(pLastBlockReader->order); + int32_t step = (asc) ? 1 : -1; + if (*pLastBlockReader->rowIndex == ALL_ROWS_CHECKED_INDEX) { + return false; + } + + *(pLastBlockReader->rowIndex) += step; + + SBlockData* pBlockData = &pLastBlockReader->lastBlockData; + for(int32_t i = *(pLastBlockReader->rowIndex); i < pBlockData->nRow && i >= 0; i += step) { + if (pBlockData->aUid != NULL) { + if (asc) { + if (pBlockData->aUid[i] < pLastBlockReader->uid) { + continue; + } else if (pBlockData->aUid[i] > pLastBlockReader->uid) { + break; + } + } else { + if (pBlockData->aUid[i] > pLastBlockReader->uid) { + continue; + } else if (pBlockData->aUid[i] < pLastBlockReader->uid) { + break; + } + } + } + + int64_t ts = pBlockData->aTSKEY[i]; + if (ts < pLastBlockReader->window.skey) { + continue; + } + + int64_t ver = pBlockData->aVersion[i]; + if (ver < pLastBlockReader->verRange.minVer) { + continue; + } + + // no data any more, todo opt handle desc case + if (ts > pLastBlockReader->window.ekey) { + continue; + } + + // todo opt handle desc case + if (ver > pLastBlockReader->verRange.maxVer) { + continue; + } + + TSDBKEY k = {.ts = ts, .version = ver}; + if (hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->lastBlockDelIndex, &k, pLastBlockReader->order)) { + continue; + } + + *(pLastBlockReader->rowIndex) = i; + return true; } - TSDBKEY k = {.ts = ts, .version = ver}; - if (hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->fileDelIndex, &k, pReader->order)) { + // set all data is consumed in last block + setAllRowsChecked(pLastBlockReader); + return false; +} + +static int64_t getCurrentKeyInLastBlock(SLastBlockReader* pLastBlockReader) { + SBlockData* pBlockData = &pLastBlockReader->lastBlockData; + return pBlockData->aTSKEY[*pLastBlockReader->rowIndex]; +} + +static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader) { + if (*pLastBlockReader->rowIndex == ALL_ROWS_CHECKED_INDEX) { return false; } + ASSERT(pLastBlockReader->lastBlockData.nRow > 0); return true; } -static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); } +int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key, STsdbReader* pReader) { + SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; + + TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex); -static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo) { + if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) { + return TSDB_CODE_SUCCESS; + } else { + STSRow* pTSRow = NULL; + SRowMerger merge = {0}; + + tRowMergerInit(&merge, &fRow, pReader->pSchema); + doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge); + tRowMergerGetRow(&merge, &pTSRow); + doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid); + + taosMemoryFree(pTSRow); + tRowMergerClear(&merge); + return TSDB_CODE_SUCCESS; + } + + return TSDB_CODE_SUCCESS; +} + +static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, + SBlockData* pBlockData, SLastBlockReader* pLastBlockReader) { SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; - SBlockData* pBlockData = &pReader->status.fileBlockData; - int64_t key = pBlockData->aTSKEY[pDumpInfo->rowIndex]; - TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader); - TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader); + int64_t key = (pBlockData->nRow > 0)? pBlockData->aTSKEY[pDumpInfo->rowIndex]:INT64_MIN; + TSDBROW* pRow = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader); + TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader); if (pBlockScanInfo->iter.hasVal && pBlockScanInfo->iiter.hasVal) { - return doMergeThreeLevelRows(pReader, pBlockScanInfo); + return doMergeMultiLevelRows(pReader, pBlockScanInfo, pBlockData, pLastBlockReader); } else { - // imem + file + // imem + file + last block if (pBlockScanInfo->iiter.hasVal) { - return doMergeBufAndFileRows(pReader, pBlockScanInfo, piRow, &pBlockScanInfo->iiter, key); + return doMergeBufAndFileRows(pReader, pBlockScanInfo, piRow, &pBlockScanInfo->iiter, key, pLastBlockReader); } - // mem + file + // mem + file + last block if (pBlockScanInfo->iter.hasVal) { - return doMergeBufAndFileRows(pReader, pBlockScanInfo, pRow, &pBlockScanInfo->iter, key); + return doMergeBufAndFileRows(pReader, pBlockScanInfo, pRow, &pBlockScanInfo->iter, key, pLastBlockReader); } - // imem & mem are all empty, only file exist - if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) { - return TSDB_CODE_SUCCESS; - } else { - TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex); - - STSRow* pTSRow = NULL; - SRowMerger merge = {0}; - - tRowMergerInit(&merge, &fRow, pReader->pSchema); - doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge); - tRowMergerGetRow(&merge, &pTSRow); - doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid); - - taosMemoryFree(pTSRow); - tRowMergerClear(&merge); - return TSDB_CODE_SUCCESS; - } + // files data blocks + last block + return mergeFileBlockAndLastBlock(pReader, pLastBlockReader, key, pBlockScanInfo, pBlockData); } } -static int32_t buildComposedDataBlock(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo) { +static int32_t buildComposedDataBlock(STsdbReader* pReader) { SSDataBlock* pResBlock = pReader->pResBlock; + SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter); + + STableBlockScanInfo* pBlockScanInfo = NULL; + if (pBlockInfo != NULL) { + pBlockScanInfo = taosHashGet(pReader->status.pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid)); + } else { + pBlockScanInfo = pReader->status.pTableIter; + } + + SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader; SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; SBlockData* pBlockData = &pReader->status.fileBlockData; int32_t step = ASCENDING_TRAVERSE(pReader->order) ? 1 : -1; - int32_t numOfSub = 1; - int64_t st = taosGetTimestampUs(); while (1) { // todo check the validate of row in file block + bool hasBlockData = false; { - if (!isValidFileBlockRow(pBlockData, pDumpInfo, pBlockScanInfo, pReader)) { + while (pBlockData->nRow > 0) { // find the first qualified row in data block + if (isValidFileBlockRow(pBlockData, pDumpInfo, pBlockScanInfo, pReader)) { + hasBlockData = true; + break; + } + pDumpInfo->rowIndex += step; SBlock* pBlock = getCurrentBlock(&pReader->status.blockIter); - numOfSub = pBlock->nSubBlock; - if (pDumpInfo->rowIndex >= pBlock->nRow || pDumpInfo->rowIndex < 0) { - setBlockAllDumped(pDumpInfo, pBlock, pReader->order); + setBlockAllDumped(pDumpInfo, pBlock->maxKey.ts, pReader->order); break; } - - continue; } } - buildComposedDataBlockImpl(pReader, pBlockScanInfo); - SBlock* pBlock = getCurrentBlock(&pReader->status.blockIter); + bool hasBlockLData = hasDataInLastBlock(pLastBlockReader); + + // no data in last block and block, no need to proceed. + if ((hasBlockData == false) && (hasBlockLData == false)) { + break; + } + + buildComposedDataBlockImpl(pReader, pBlockScanInfo, pBlockData, pLastBlockReader); // currently loaded file data block is consumed - if (pDumpInfo->rowIndex >= pBlock->nRow || pDumpInfo->rowIndex < 0) { - setBlockAllDumped(pDumpInfo, pBlock, pReader->order); + if ((pBlockData->nRow > 0) && (pDumpInfo->rowIndex >= pBlockData->nRow || pDumpInfo->rowIndex < 0)) { + SBlock* pBlock = getCurrentBlock(&pReader->status.blockIter); + setBlockAllDumped(pDumpInfo, pBlock->maxKey.ts, pReader->order); break; } @@ -1589,10 +2010,12 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader, STableBlockScanInfo* setComposedBlockFlag(pReader, true); int64_t et = taosGetTimestampUs(); - tsdbDebug("%p uid:%" PRIu64 ", composed data block created, subBlock:%d, brange:%" PRIu64 "-%" PRIu64 - " rows:%d, elapsed time:%.2f ms %s", - pReader, pBlockScanInfo->uid, numOfSub, pResBlock->info.window.skey, pResBlock->info.window.ekey, - pResBlock->info.rows, (et - st) / 1000.0, pReader->idStr); + if (pResBlock->info.rows > 0) { + tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 + " rows:%d, elapsed time:%.2f ms %s", + pReader, pBlockScanInfo->uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, + pResBlock->info.rows, (et - st) / 1000.0, pReader->idStr); + } return TSDB_CODE_SUCCESS; } @@ -1617,7 +2040,7 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea STbData* d = NULL; if (pReader->pReadSnap->pMem != NULL) { - tsdbGetTbDataFromMemTable(pReader->pReadSnap->pMem, pReader->suid, pBlockScanInfo->uid, &d); + d = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pMem, pReader->suid, pBlockScanInfo->uid); if (d != NULL) { code = tsdbTbDataIterCreate(d, &startKey, backward, &pBlockScanInfo->iter.iter); if (code == TSDB_CODE_SUCCESS) { @@ -1638,7 +2061,7 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea STbData* di = NULL; if (pReader->pReadSnap->pIMem != NULL) { - tsdbGetTbDataFromMemTable(pReader->pReadSnap->pIMem, pReader->suid, pBlockScanInfo->uid, &di); + di = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pIMem, pReader->suid, pBlockScanInfo->uid); if (di != NULL) { code = tsdbTbDataIterCreate(di, &startKey, backward, &pBlockScanInfo->iiter.iter); if (code == TSDB_CODE_SUCCESS) { @@ -1677,7 +2100,7 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* SDelFile* pDelFile = pReader->pReadSnap->fs.pDelFile; if (pDelFile) { SDelFReader* pDelFReader = NULL; - code = tsdbDelFReaderOpen(&pDelFReader, pDelFile, pTsdb, NULL); + code = tsdbDelFReaderOpen(&pDelFReader, pDelFile, pTsdb); if (code != TSDB_CODE_SUCCESS) { goto _err; } @@ -1688,7 +2111,7 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* goto _err; } - code = tsdbReadDelIdx(pDelFReader, aDelIdx, NULL); + code = tsdbReadDelIdx(pDelFReader, aDelIdx); if (code != TSDB_CODE_SUCCESS) { taosArrayDestroy(aDelIdx); tsdbDelFReaderClose(&pDelFReader); @@ -1699,7 +2122,7 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* SDelIdx* pIdx = taosArraySearch(aDelIdx, &idx, tCmprDelIdx, TD_EQ); if (pIdx != NULL) { - code = tsdbReadDelData(pDelFReader, pIdx, pDelData, NULL); + code = tsdbReadDelData(pDelFReader, pIdx, pDelData); } taosArrayDestroy(aDelIdx); @@ -1737,6 +2160,7 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* ASCENDING_TRAVERSE(pReader->order) ? 0 : taosArrayGetSize(pBlockScanInfo->delSkyline) - 1; pBlockScanInfo->iiter.index = pBlockScanInfo->iter.index; pBlockScanInfo->fileDelIndex = pBlockScanInfo->iter.index; + pBlockScanInfo->lastBlockDelIndex = pBlockScanInfo->iter.index; return code; _err: @@ -1744,19 +2168,16 @@ _err: return code; } -static TSDBKEY getCurrentKeyInBuf(SDataBlockIter* pBlockIter, STsdbReader* pReader) { +static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader) { TSDBKEY key = {.ts = TSKEY_INITIAL_VAL}; - SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter); - STableBlockScanInfo* pScanInfo = taosHashGet(pReader->status.pTableMap, &pFBlock->uid, sizeof(pFBlock->uid)); - initMemDataIterator(pScanInfo, pReader); - TSDBROW* pRow = getValidRow(&pScanInfo->iter, pScanInfo->delSkyline, pReader); + TSDBROW* pRow = getValidMemRow(&pScanInfo->iter, pScanInfo->delSkyline, pReader); if (pRow != NULL) { key = TSDBROW_KEY(pRow); } - pRow = getValidRow(&pScanInfo->iiter, pScanInfo->delSkyline, pReader); + pRow = getValidMemRow(&pScanInfo->iiter, pScanInfo->delSkyline, pReader); if (pRow != NULL) { TSDBKEY k = TSDBROW_KEY(pRow); if (key.ts > k.ts) { @@ -1767,11 +2188,15 @@ static TSDBKEY getCurrentKeyInBuf(SDataBlockIter* pBlockIter, STsdbReader* pRead return key; } -static int32_t moveToNextFile(STsdbReader* pReader, int32_t* numOfBlocks) { +static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) { SReaderStatus* pStatus = &pReader->status; + pBlockNum->numOfBlocks = 0; + pBlockNum->numOfLastBlocks = 0; size_t numOfTables = taosHashGetSize(pReader->status.pTableMap); SArray* pIndexList = taosArrayInit(numOfTables, sizeof(SBlockIdx)); + SArray* pLastBlocks = pStatus->fileIter.pLastBlockReader->pBlockL; + taosArrayClear(pLastBlocks); while (1) { bool hasNext = filesetIteratorNext(&pStatus->fileIter, pReader); @@ -1786,18 +2211,34 @@ static int32_t moveToNextFile(STsdbReader* pReader, int32_t* numOfBlocks) { return code; } - if (taosArrayGetSize(pIndexList) > 0) { - uint32_t numOfValidTable = 0; - code = doLoadFileBlock(pReader, pIndexList, &numOfValidTable, numOfBlocks); + code = tsdbReadBlockL(pReader->pFileReader, pLastBlocks); + if (code != TSDB_CODE_SUCCESS) { + taosArrayDestroy(pIndexList); + return code; + } + + if (taosArrayGetSize(pIndexList) > 0 || taosArrayGetSize(pLastBlocks) > 0) { + SArray* pQLastBlock = taosArrayInit(4, sizeof(SBlockL)); + + code = doLoadFileBlock(pReader, pIndexList, pLastBlocks, pBlockNum, pQLastBlock); if (code != TSDB_CODE_SUCCESS) { taosArrayDestroy(pIndexList); + taosArrayDestroy(pQLastBlock); return code; } - if (numOfValidTable > 0) { + if (pBlockNum->numOfBlocks + pBlockNum->numOfLastBlocks > 0) { + ASSERT(taosArrayGetSize(pQLastBlock) == pBlockNum->numOfLastBlocks); + taosArrayClear(pLastBlocks); + taosArrayAddAll(pLastBlocks, pQLastBlock); + + taosArrayDestroy(pQLastBlock); break; } + + taosArrayDestroy(pQLastBlock); } + // no blocks in current file, try next files } @@ -1805,40 +2246,275 @@ static int32_t moveToNextFile(STsdbReader* pReader, int32_t* numOfBlocks) { return TSDB_CODE_SUCCESS; } +static int32_t doLoadRelatedLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo *pBlockScanInfo, STsdbReader* pReader) { + SArray* pBlocks = pLastBlockReader->pBlockL; + SBlockL* pBlock = NULL; + + uint64_t uid = pBlockScanInfo->uid; + int32_t totalLastBlocks = (int32_t)taosArrayGetSize(pBlocks); + + initMemDataIterator(pBlockScanInfo, pReader); + + // find the correct SBlockL. todo binary search + int32_t index = -1; + for (int32_t i = 0; i < totalLastBlocks; ++i) { + SBlockL* p = taosArrayGet(pBlocks, i); + if (p->minUid <= uid && p->maxUid >= uid) { + index = i; + pBlock = p; + break; + } + } + + if (index == -1) { + pLastBlockReader->currentBlockIndex = index; + tBlockDataReset(&pLastBlockReader->lastBlockData); + return TSDB_CODE_SUCCESS; + } + + // the required last datablock has already loaded + if (index == pLastBlockReader->currentBlockIndex) { + return TSDB_CODE_SUCCESS; + } + + int64_t st = taosGetTimestampUs(); + int32_t code = tBlockDataInit(&pLastBlockReader->lastBlockData, pReader->suid, pReader->suid ? 0 : uid, pReader->pSchema); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("%p init block data failed, code:%s %s", pReader, tstrerror(code), pReader->idStr); + return code; + } + + code = tsdbReadLastBlock(pReader->pFileReader, pBlock, &pLastBlockReader->lastBlockData); + + double el = (taosGetTimestampUs() - st) / 1000.0; + if (code != TSDB_CODE_SUCCESS) { + tsdbError("%p error occurs in loading last block into buffer, last block index:%d, total:%d code:%s %s", pReader, + pLastBlockReader->currentBlockIndex, totalLastBlocks, tstrerror(code), pReader->idStr); + } else { + tsdbDebug("%p load last block completed, uid:%" PRIu64 + " last block index:%d, total:%d rows:%d, minVer:%d, maxVer:%d, brange:%" PRId64 "-%" PRId64 + " elapsed time:%.2f ms, %s", + pReader, uid, index, totalLastBlocks, pBlock->nRow, pBlock->minVer, pBlock->maxVer, pBlock->minKey, + pBlock->maxKey, el, pReader->idStr); + } + + pLastBlockReader->currentBlockIndex = index; + pReader->cost.lastBlockLoad += 1; + pReader->cost.lastBlockLoadTime += el; + + return TSDB_CODE_SUCCESS; +} + +static int32_t uidComparFunc(const void* p1, const void* p2) { + uint64_t pu1 = *(uint64_t*) p1; + uint64_t pu2 = *(uint64_t*) p2; + if (pu1 == pu2) { + return 0; + } else { + return (pu1 < pu2)? -1:1; + } +} + +static void extractOrderedTableUidList(SUidOrderCheckInfo *pOrderCheckInfo, SReaderStatus* pStatus) { + int32_t index = 0; + int32_t total = taosHashGetSize(pStatus->pTableMap); + + void* p = taosHashIterate(pStatus->pTableMap, NULL); + while(p != NULL) { + STableBlockScanInfo* pScanInfo = p; + pOrderCheckInfo->tableUidList[index++] = pScanInfo->uid; + p = taosHashIterate(pStatus->pTableMap, p); + } + + taosSort(pOrderCheckInfo->tableUidList, total, sizeof(uint64_t), uidComparFunc); +} + +static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus) { + if (pOrderCheckInfo->tableUidList == NULL) { + int32_t total = taosHashGetSize(pStatus->pTableMap); + + pOrderCheckInfo->currentIndex = 0; + pOrderCheckInfo->tableUidList = taosMemoryMalloc(total * sizeof(uint64_t)); + if (pOrderCheckInfo->tableUidList == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + extractOrderedTableUidList(pOrderCheckInfo, pStatus); + + uint64_t uid = pOrderCheckInfo->tableUidList[0]; + pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); + } else { + if (pStatus->pTableIter == NULL) { // it is the last block of a new file +// ASSERT(pOrderCheckInfo->currentIndex == taosHashGetSize(pStatus->pTableMap)); + + pOrderCheckInfo->currentIndex = 0; + uint64_t uid = pOrderCheckInfo->tableUidList[pOrderCheckInfo->currentIndex]; + pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); + + // the tableMap has already updated + if (pStatus->pTableIter == NULL) { + void* p = taosMemoryRealloc(pOrderCheckInfo->tableUidList, taosHashGetSize(pStatus->pTableMap)*sizeof(uint64_t)); + if (p == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + pOrderCheckInfo->tableUidList = p; + extractOrderedTableUidList(pOrderCheckInfo, pStatus); + + uid = pOrderCheckInfo->tableUidList[0]; + pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); + } + } + } + + return TSDB_CODE_SUCCESS; +} + +static bool moveToNextTable(SUidOrderCheckInfo *pOrderedCheckInfo, SReaderStatus* pStatus) { + pOrderedCheckInfo->currentIndex += 1; + if (pOrderedCheckInfo->currentIndex >= taosHashGetSize(pStatus->pTableMap)) { + pStatus->pTableIter = NULL; + return false; + } + + uint64_t uid = pOrderedCheckInfo->tableUidList[pOrderedCheckInfo->currentIndex]; + pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); + ASSERT(pStatus->pTableIter != NULL); + return true; +} + +static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { + SReaderStatus* pStatus = &pReader->status; + SLastBlockReader* pLastBlockReader = pStatus->fileIter.pLastBlockReader; + + SUidOrderCheckInfo *pOrderedCheckInfo = &pStatus->uidCheckInfo; + int32_t code = initOrderCheckInfo(pOrderedCheckInfo, pStatus); + if (code != TSDB_CODE_SUCCESS || (taosHashGetSize(pStatus->pTableMap) == 0)) { + return code; + } + + while(1) { + // load the last data block of current table + STableBlockScanInfo* pScanInfo = pStatus->pTableIter; + code = doLoadRelatedLastBlock(pLastBlockReader, pScanInfo, pReader); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + if (pLastBlockReader->currentBlockIndex != -1) { + initLastBlockReader(pLastBlockReader, pScanInfo->uid, &pScanInfo->indexInBlockL); + int32_t index = pScanInfo->indexInBlockL; + + if (index == INITIAL_ROW_INDEX_VAL || index == pLastBlockReader->lastBlockData.nRow) { + bool hasData = nextRowInLastBlock(pLastBlockReader, pScanInfo); + if (!hasData) { // current table does not have rows in last block, try next table + bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus); + if (!hasNexTable) { + return TSDB_CODE_SUCCESS; + } + continue; + } + } + } else { // no data in last block, try next table + bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus); + if (!hasNexTable) { + return TSDB_CODE_SUCCESS; + } + continue; + } + + code = doBuildDataBlock(pReader); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + if (pReader->pResBlock->info.rows > 0) { + return TSDB_CODE_SUCCESS; + } + + // current table is exhausted, let's try next table + bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus); + if (!hasNexTable) { + return TSDB_CODE_SUCCESS; + } + } +} + static int32_t doBuildDataBlock(STsdbReader* pReader) { + TSDBKEY key = {0}; int32_t code = TSDB_CODE_SUCCESS; + SBlock* pBlock = NULL; - SReaderStatus* pStatus = &pReader->status; - SDataBlockIter* pBlockIter = &pStatus->blockIter; + SReaderStatus* pStatus = &pReader->status; + SDataBlockIter* pBlockIter = &pStatus->blockIter; + STableBlockScanInfo* pScanInfo = NULL; + SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter); + SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader; + + if (pBlockInfo != NULL) { + pScanInfo = taosHashGet(pReader->status.pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid)); + } else { + pScanInfo = pReader->status.pTableIter; + } - SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter); - STableBlockScanInfo* pScanInfo = taosHashGet(pStatus->pTableMap, &pFBlock->uid, sizeof(pFBlock->uid)); + if (pBlockInfo != NULL) { + pBlock = getCurrentBlock(pBlockIter); + } - SBlock* pBlock = getCurrentBlock(pBlockIter); + { + key = getCurrentKeyInBuf(pScanInfo, pReader); - TSDBKEY key = getCurrentKeyInBuf(pBlockIter, pReader); - if (fileBlockShouldLoad(pReader, pFBlock, pBlock, pScanInfo, key)) { + // load the last data block of current table + code = doLoadRelatedLastBlock(pLastBlockReader, pScanInfo, pReader); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + // note: the lastblock may be null here + initLastBlockReader(pLastBlockReader, pScanInfo->uid, &pScanInfo->indexInBlockL); + if (pScanInfo->indexInBlockL == INITIAL_ROW_INDEX_VAL || pScanInfo->indexInBlockL == pLastBlockReader->lastBlockData.nRow) { + bool hasData = nextRowInLastBlock(pLastBlockReader, pScanInfo); + } + } + + if (pBlockInfo == NULL) { // build data block from last data file + ASSERT(pBlockIter->numOfBlocks == 0); + code = buildComposedDataBlock(pReader); + } else if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, key, pLastBlockReader)) { tBlockDataReset(&pStatus->fileBlockData); - tBlockDataClearData(&pStatus->fileBlockData); - code = doLoadFileBlockData(pReader, pBlockIter, pScanInfo, &pStatus->fileBlockData); + code = tBlockDataInit(&pStatus->fileBlockData, pReader->suid, pScanInfo->uid, pReader->pSchema); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData); if (code != TSDB_CODE_SUCCESS) { return code; } // build composed data block - code = buildComposedDataBlock(pReader, pScanInfo); + code = buildComposedDataBlock(pReader); } else if (bufferDataInFileBlockGap(pReader->order, key, pBlock)) { // data in memory that are earlier than current file block // todo rows in buffer should be less than the file block in asc, greater than file block in desc int64_t endKey = (ASCENDING_TRAVERSE(pReader->order)) ? pBlock->minKey.ts : pBlock->maxKey.ts; code = buildDataBlockFromBuf(pReader, pScanInfo, endKey); - } else { // whole block is required, return it directly - SDataBlockInfo* pInfo = &pReader->pResBlock->info; - pInfo->rows = pBlock->nRow; - pInfo->uid = pScanInfo->uid; - pInfo->window = (STimeWindow){.skey = pBlock->minKey.ts, .ekey = pBlock->maxKey.ts}; - setComposedBlockFlag(pReader, false); - setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlock, pReader->order); + } else { + if (hasDataInLastBlock(pLastBlockReader) && !ASCENDING_TRAVERSE(pReader->order)) { + // only return the rows in last block + int64_t tsLast = getCurrentKeyInLastBlock(pLastBlockReader); + ASSERT (tsLast >= pBlock->maxKey.ts); + tBlockDataReset(&pReader->status.fileBlockData); + + code = buildComposedDataBlock(pReader); + } else { // whole block is required, return it directly + SDataBlockInfo* pInfo = &pReader->pResBlock->info; + pInfo->rows = pBlock->nRow; + pInfo->uid = pScanInfo->uid; + pInfo->window = (STimeWindow){.skey = pBlock->minKey.ts, .ekey = pBlock->maxKey.ts}; + setComposedBlockFlag(pReader, false); + setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlock->maxKey.ts, pReader->order); + } } return code; @@ -1890,20 +2566,29 @@ static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter) } static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBlockIter) { - int32_t numOfBlocks = 0; - int32_t code = moveToNextFile(pReader, &numOfBlocks); + SBlockNumber num = {0}; + + int32_t code = moveToNextFile(pReader, &num); if (code != TSDB_CODE_SUCCESS) { return code; } // all data files are consumed, try data in buffer - if (numOfBlocks == 0) { + if (num.numOfBlocks + num.numOfLastBlocks == 0) { pReader->status.loadFromFile = false; return code; } // initialize the block iterator for a new fileset - code = initBlockIterator(pReader, pBlockIter, numOfBlocks); + if (num.numOfBlocks > 0) { + code = initBlockIterator(pReader, pBlockIter, num.numOfBlocks); + } else { // no block data, only last block exists + tBlockDataReset(&pReader->status.fileBlockData); + resetDataBlockIterator(pBlockIter, pReader->order); + } + + SLastBlockReader* pLReader = pReader->status.fileIter.pLastBlockReader; + pLReader->currentBlockIndex = -1; // set the correct start position according to the query time window initBlockDumpInfo(pReader, pBlockIter); @@ -1921,14 +2606,47 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { SDataBlockIter* pBlockIter = &pReader->status.blockIter; - while (1) { - SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(&pReader->status.blockIter); - STableBlockScanInfo* pScanInfo = taosHashGet(pReader->status.pTableMap, &pFBlock->uid, sizeof(pFBlock->uid)); + if (pBlockIter->numOfBlocks == 0) { + _begin: + code = doLoadLastBlockSequentially(pReader); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + if (pReader->pResBlock->info.rows > 0) { + return TSDB_CODE_SUCCESS; + } + + // all data blocks are checked in this last block file, now let's try the next file + if (pReader->status.pTableIter == NULL) { + code = initForFirstBlockInFile(pReader, pBlockIter); + + // error happens or all the data files are completely checked + if ((code != TSDB_CODE_SUCCESS) || (pReader->status.loadFromFile == false)) { + return code; + } + + // this file does not have data files, let's start check the last block file if exists + if (pBlockIter->numOfBlocks == 0) { + goto _begin; + } + } + + code = doBuildDataBlock(pReader); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + if (pReader->pResBlock->info.rows > 0) { + return TSDB_CODE_SUCCESS; + } + } + while (1) { SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; if (fileBlockPartiallyRead(pDumpInfo, asc)) { // file data block is partially loaded - code = buildComposedDataBlock(pReader, pScanInfo); + code = buildComposedDataBlock(pReader); } else { // current block are exhausted, try the next file block if (pDumpInfo->allDumped) { @@ -1936,17 +2654,25 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { bool hasNext = blockIteratorNext(&pReader->status.blockIter); if (hasNext) { // check for the next block in the block accessed order list initBlockDumpInfo(pReader, pBlockIter); - } else { // data blocks in current file are exhausted, let's try the next file now + } else if (taosArrayGetSize(pReader->status.fileIter.pLastBlockReader->pBlockL) > 0) { // data blocks in current file are exhausted, let's try the next file now + tBlockDataReset(&pReader->status.fileBlockData); + resetDataBlockIterator(pBlockIter, pReader->order); + goto _begin; + } else { code = initForFirstBlockInFile(pReader, pBlockIter); // error happens or all the data files are completely checked if ((code != TSDB_CODE_SUCCESS) || (pReader->status.loadFromFile == false)) { return code; } + + // this file does not have blocks, let's start check the last block file + if (pBlockIter->numOfBlocks == 0) { + goto _begin; + } } } - // current block is not loaded yet, or data in buffer may overlap with the file block. code = doBuildDataBlock(pReader); } @@ -2014,39 +2740,6 @@ SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_ return (SVersionRange){.minVer = startVer, .maxVer = endVer}; } -// // todo not unref yet, since it is not support multi-group interpolation query -// static UNUSED_FUNC void changeQueryHandleForInterpQuery(STsdbReader* pHandle) { -// // filter the queried time stamp in the first place -// STsdbReader* pTsdbReadHandle = (STsdbReader*)pHandle; - -// // starts from the buffer in case of descending timestamp order check data blocks -// size_t numOfTables = taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo); - -// int32_t i = 0; -// while (i < numOfTables) { -// STableBlockScanInfo* pCheckInfo = taosArrayGet(pTsdbReadHandle->pTableCheckInfo, i); - -// // the first qualified table for interpolation query -// // if ((pTsdbReadHandle->window.skey <= pCheckInfo->pTableObj->lastKey) && -// // (pCheckInfo->pTableObj->lastKey != TSKEY_INITIAL_VAL)) { -// // break; -// // } - -// i++; -// } - -// // there are no data in all the tables -// if (i == numOfTables) { -// return; -// } - -// STableBlockScanInfo info = *(STableBlockScanInfo*)taosArrayGet(pTsdbReadHandle->pTableCheckInfo, i); -// taosArrayClear(pTsdbReadHandle->pTableCheckInfo); - -// info.lastKey = pTsdbReadHandle->window.skey; -// taosArrayPush(pTsdbReadHandle->pTableCheckInfo, &info); -// } - bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order) { ASSERT(pKey != NULL); if (pDelList == NULL) { @@ -2147,7 +2840,7 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32 return false; } -TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader) { +TSDBROW* getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader) { if (!pIter->hasVal) { return NULL; } @@ -2195,7 +2888,7 @@ int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDe } // data exists but not valid - TSDBROW* pRow = getValidRow(pIter, pDelList, pReader); + TSDBROW* pRow = getValidMemRow(pIter, pDelList, pReader); if (pRow == NULL) { break; } @@ -2265,8 +2958,12 @@ static int32_t checkForNeighborFileBlock(STsdbReader* pReader, STableBlockScanIn // 3. load the neighbor block, and set it to be the currently accessed file data block tBlockDataReset(&pStatus->fileBlockData); - tBlockDataClearData(&pStatus->fileBlockData); - int32_t code = doLoadFileBlockData(pReader, pBlockIter, pScanInfo, &pStatus->fileBlockData); + int32_t code = tBlockDataInit(&pStatus->fileBlockData, pReader->suid, pFBlock->uid, pReader->pSchema); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -2315,7 +3012,21 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc return TSDB_CODE_SUCCESS; } -void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow, +int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts, SRowMerger* pMerger) { + while(nextRowInLastBlock(pLastBlockReader, pScanInfo)) { + int64_t next1 = getCurrentKeyInLastBlock(pLastBlockReader); + if (next1 == ts) { + TSDBROW fRow1 = tsdbRowFromBlockData(&pLastBlockReader->lastBlockData, *pLastBlockReader->rowIndex); + tRowMerge(pMerger, &fRow1); + } else { + break; + } + } + + return TSDB_CODE_SUCCESS; +} + +void doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow, STsdbReader* pReader, bool* freeTSRow) { TSDBROW* pNextRow = NULL; TSDBROW current = *pRow; @@ -2328,7 +3039,7 @@ void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDe *freeTSRow = false; return; } else { // has next point in mem/imem - pNextRow = getValidRow(pIter, pDelList, pReader); + pNextRow = getValidMemRow(pIter, pDelList, pReader); if (pNextRow == NULL) { *pTSRow = current.pTSRow; *freeTSRow = false; @@ -2394,9 +3105,10 @@ void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlo int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STSRow** pTSRow, int64_t endKey, bool* freeTSRow) { - TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader); - TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader); + TSDBROW* pRow = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader); + TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader); SArray* pDelList = pBlockScanInfo->delSkyline; + uint64_t uid = pBlockScanInfo->uid; // todo refactor bool asc = ASCENDING_TRAVERSE(pReader->order); @@ -2418,10 +3130,12 @@ int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pR TSDBKEY k = TSDBROW_KEY(pRow); TSDBKEY ik = TSDBROW_KEY(piRow); - if (ik.ts < k.ts) { // ik.ts < k.ts - doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow); - } else if (k.ts < ik.ts) { - doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow); + if (ik.ts != k.ts) { + if (((ik.ts < k.ts) && asc) || ((ik.ts > k.ts) && (!asc))) { // ik.ts < k.ts + doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow); + } else if (((k.ts < ik.ts) && asc) || ((k.ts > ik.ts) && (!asc))) { + doMergeMemTableMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow); + } } else { // ik.ts == k.ts doMergeMemIMemRows(pRow, piRow, pBlockScanInfo, pReader, pTSRow); *freeTSRow = true; @@ -2431,12 +3145,12 @@ int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pR } if (pBlockScanInfo->iter.hasVal && pRow != NULL) { - doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow); + doMergeMemTableMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow); return TSDB_CODE_SUCCESS; } if (pBlockScanInfo->iiter.hasVal && piRow != NULL) { - doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow); + doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow); return TSDB_CODE_SUCCESS; } @@ -2487,7 +3201,7 @@ int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* return TSDB_CODE_SUCCESS; } -int32_t doAppendRowFromBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData, int32_t rowIndex) { +int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData, int32_t rowIndex) { int32_t i = 0, j = 0; int32_t outputRowIndex = pResBlock->info.rows; @@ -2564,7 +3278,7 @@ int32_t tsdbSetTableId(STsdbReader* pReader, int64_t uid) { ASSERT(pReader != NULL); taosHashClear(pReader->status.pTableMap); - STableBlockScanInfo info = {.lastKey = 0, .uid = uid}; + STableBlockScanInfo info = {.lastKey = 0, .uid = uid, .indexInBlockL = INITIAL_ROW_INDEX_VAL}; taosHashPut(pReader->status.pTableMap, &info.uid, sizeof(uint64_t), &info, sizeof(info)); return TDB_CODE_SUCCESS; } @@ -2585,32 +3299,6 @@ void* tsdbGetIvtIdx(SMeta* pMeta) { uint64_t getReaderMaxVersion(STsdbReader* pReader) { return pReader->verRange.maxVer; } -/** - * @brief Get all suids since suid - * - * @param pMeta - * @param suid return all suids in one vnode if suid is 0 - * @param list - * @return int32_t - */ -int32_t tsdbGetStbIdList(SMeta* pMeta, int64_t suid, SArray* list) { - SMStbCursor* pCur = metaOpenStbCursor(pMeta, suid); - if (!pCur) { - return TSDB_CODE_FAILED; - } - - while (1) { - tb_uid_t id = metaStbCursorNext(pCur); - if (id == 0) { - break; - } - - taosArrayPush(list, &id); - } - - metaCloseStbCursor(pCur); - return TSDB_CODE_SUCCESS; -} // ====================================== EXPOSED APIs ====================================== int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTableList, STsdbReader** ppReader, @@ -2641,6 +3329,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl pCond->order = TSDB_ORDER_ASC; } + // here we only need one more row, so the capacity is set to be ONE. code = tsdbReaderCreate(pVnode, pCond, &pReader->innerReader[0], 1, idstr); if (code != TSDB_CODE_SUCCESS) { goto _err; @@ -2660,10 +3349,10 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl } if (pCond->suid != 0) { - pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, pReader->suid, -1); + pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, pReader->suid, pCond->schemaVersion); } else if (taosArrayGetSize(pTableList) > 0) { STableKeyInfo* pKey = taosArrayGet(pTableList, 0); - pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, pKey->uid, -1); + pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, pKey->uid, pCond->schemaVersion); } int32_t numOfTables = taosArrayGetSize(pTableList); @@ -2684,8 +3373,8 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl if (pReader->type == TIMEWINDOW_RANGE_CONTAINED) { SDataBlockIter* pBlockIter = &pReader->status.blockIter; - initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader->order, pReader->idStr); - resetDataBlockIterator(&pReader->status.blockIter, pReader->order, pReader->status.pTableMap); + initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader); + resetDataBlockIterator(&pReader->status.blockIter, pReader->order); // no data in files, let's try buffer in memory if (pReader->status.fileIter.numOfFiles == 0) { @@ -2705,9 +3394,8 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl goto _err; } - initFilesetIterator(&pPrevReader->status.fileIter, pPrevReader->pReadSnap->fs.aDFileSet, pPrevReader->order, - pPrevReader->idStr); - resetDataBlockIterator(&pPrevReader->status.blockIter, pPrevReader->order, pReader->status.pTableMap); + initFilesetIterator(&pPrevReader->status.fileIter, pPrevReader->pReadSnap->fs.aDFileSet, pPrevReader); + resetDataBlockIterator(&pPrevReader->status.blockIter, pPrevReader->order); // no data in files, let's try buffer in memory if (pPrevReader->status.fileIter.numOfFiles == 0) { @@ -2746,7 +3434,7 @@ void tsdbReaderClose(STsdbReader* pReader) { } } taosMemoryFree(pSupInfo->buildBuf); - tBlockDataClear(&pReader->status.fileBlockData, true); + tBlockDataDestroy(&pReader->status.fileBlockData, true); cleanupDataBlockIterator(&pReader->status.blockIter); @@ -2758,15 +3446,25 @@ void tsdbReaderClose(STsdbReader* pReader) { tsdbDataFReaderClose(&pReader->pFileReader); } + taosMemoryFree(pReader->status.uidCheckInfo.tableUidList); + + SFilesetIter* pFilesetIter = &pReader->status.fileIter; + if (pFilesetIter->pLastBlockReader != NULL) { + tBlockDataDestroy(&pFilesetIter->pLastBlockReader->lastBlockData, true); + taosArrayDestroy(pFilesetIter->pLastBlockReader->pBlockL); + taosMemoryFree(pFilesetIter->pLastBlockReader); + } + SIOCostSummary* pCost = &pReader->cost; tsdbDebug("%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%" PRId64 " SMA-time:%.2f ms, fileBlocks:%" PRId64 ", fileBlocks-time:%.2f ms, " - "build in-memory-block-time:%.2f ms, STableBlockScanInfo size:%.2f Kb %s", - pReader, pCost->headFileLoad, pCost->headFileLoadTime, pCost->smaData, pCost->smaLoadTime, - pCost->numOfBlocks, pCost->blockLoadTime, pCost->buildmemBlock, - numOfTables * sizeof(STableBlockScanInfo) / 1000.0, pReader->idStr); + "build in-memory-block-time:%.2f ms, lastBlocks:%" PRId64 + ", lastBlocks-time:%.2f ms, STableBlockScanInfo size:%.2f Kb %s", + pReader, pCost->headFileLoad, pCost->headFileLoadTime, pCost->smaDataLoad, pCost->smaLoadTime, + pCost->numOfBlocks, pCost->blockLoadTime, pCost->buildmemBlock, pCost->lastBlockLoad, + pCost->lastBlockLoadTime, numOfTables * sizeof(STableBlockScanInfo) / 1000.0, pReader->idStr); taosMemoryFree(pReader->idStr); taosMemoryFree(pReader->pSchema); @@ -2883,7 +3581,7 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS SBlockLoadSuppInfo* pSup = &pReader->suppInfo; if (tBlockHasSma(pBlock)) { - code = tsdbReadBlockSma(pReader->pFileReader, pBlock, pSup->pColAgg, NULL); + code = tsdbReadBlockSma(pReader->pFileReader, pBlock, pSup->pColAgg); if (code != TSDB_CODE_SUCCESS) { tsdbDebug("vgId:%d, failed to load block SMA for uid %" PRIu64 ", code:%s, %s", 0, pFBlock->uid, tstrerror(code), pReader->idStr); @@ -2928,7 +3626,7 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS double elapsed = (taosGetTimestampUs() - stime) / 1000.0; pReader->cost.smaLoadTime += elapsed; - pReader->cost.smaData += 1; + pReader->cost.smaDataLoad += 1; *pBlockStatis = pSup->plist; @@ -2949,11 +3647,15 @@ static SArray* doRetrieveDataBlock(STsdbReader* pReader) { STableBlockScanInfo* pBlockScanInfo = taosHashGet(pStatus->pTableMap, &pFBlock->uid, sizeof(pFBlock->uid)); tBlockDataReset(&pStatus->fileBlockData); - tBlockDataClearData(&pStatus->fileBlockData); - int32_t code = doLoadFileBlockData(pReader, &pStatus->blockIter, pBlockScanInfo, &pStatus->fileBlockData); + int32_t code = tBlockDataInit(&pStatus->fileBlockData, pReader->suid, pBlockScanInfo->uid, pReader->pSchema); if (code != TSDB_CODE_SUCCESS) { - tBlockDataClear(&pStatus->fileBlockData, 1); + terrno = code; + return NULL; + } + code = doLoadFileBlockData(pReader, &pStatus->blockIter, &pStatus->fileBlockData); + if (code != TSDB_CODE_SUCCESS) { + tBlockDataDestroy(&pStatus->fileBlockData, 1); terrno = code; return NULL; } @@ -2995,8 +3697,8 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) { int32_t numOfTables = taosHashGetSize(pReader->status.pTableMap); tsdbDataFReaderClose(&pReader->pFileReader); - initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader->order, pReader->idStr); - resetDataBlockIterator(&pReader->status.blockIter, pReader->order, pReader->status.pTableMap); + initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader); + resetDataBlockIterator(&pReader->status.blockIter, pReader->order); resetDataBlockScanInfo(pReader->status.pTableMap); int32_t code = 0; @@ -3104,7 +3806,7 @@ int64_t tsdbGetNumOfRowsInMemTable(STsdbReader* pReader) { STbData* d = NULL; if (pReader->pTsdb->mem != NULL) { - tsdbGetTbDataFromMemTable(pReader->pReadSnap->pMem, pReader->suid, pBlockScanInfo->uid, &d); + d = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pMem, pReader->suid, pBlockScanInfo->uid); if (d != NULL) { rows += tsdbGetNRowsInTbData(d); } @@ -3112,7 +3814,7 @@ int64_t tsdbGetNumOfRowsInMemTable(STsdbReader* pReader) { STbData* di = NULL; if (pReader->pTsdb->imem != NULL) { - tsdbGetTbDataFromMemTable(pReader->pReadSnap->pIMem, pReader->suid, pBlockScanInfo->uid, &di); + di = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pIMem, pReader->suid, pBlockScanInfo->uid); if (di != NULL) { rows += tsdbGetNRowsInTbData(di); } diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index ea9c3e5313d509cd3f8476a2a33c3cc6344ea564..c8f3862071b3628fdefd26df58ea3cb01e80d302 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -15,8 +15,6 @@ #include "tsdb.h" -#define TSDB_FILE_DLMT ((uint32_t)0xF00AFA0F) - // SDelFWriter ==================================================== int32_t tsdbDelFWriterOpen(SDelFWriter **ppWriter, SDelFile *pFile, STsdb *pTsdb) { int32_t code = 0; @@ -63,6 +61,7 @@ _err: int32_t tsdbDelFWriterClose(SDelFWriter **ppWriter, int8_t sync) { int32_t code = 0; SDelFWriter *pWriter = *ppWriter; + STsdb *pTsdb = pWriter->pTsdb; // sync if (sync && taosFsyncFile(pWriter->pWriteH) < 0) { @@ -76,47 +75,47 @@ int32_t tsdbDelFWriterClose(SDelFWriter **ppWriter, int8_t sync) { goto _err; } + for (int32_t iBuf = 0; iBuf < sizeof(pWriter->aBuf) / sizeof(uint8_t *); iBuf++) { + tFree(pWriter->aBuf[iBuf]); + } + taosMemoryFree(pWriter); + *ppWriter = NULL; return code; _err: - tsdbError("vgId:%d, failed to close del file writer since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); + tsdbError("vgId:%d, failed to close del file writer since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); return code; } -int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, uint8_t **ppBuf, SDelIdx *pDelIdx) { - int32_t code = 0; - uint8_t *pBuf = NULL; - int64_t size; - int64_t n; - SBlockDataHdr hdr = {.delimiter = TSDB_FILE_DLMT, .suid = pDelIdx->suid, .uid = pDelIdx->uid}; - - if (!ppBuf) ppBuf = &pBuf; +int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, SDelIdx *pDelIdx) { + int32_t code = 0; + int64_t size; + int64_t n; // prepare - size = sizeof(hdr); + size = sizeof(uint32_t); for (int32_t iDelData = 0; iDelData < taosArrayGetSize(aDelData); iDelData++) { size += tPutDelData(NULL, taosArrayGet(aDelData, iDelData)); } size += sizeof(TSCKSUM); // alloc - code = tRealloc(ppBuf, size); + code = tRealloc(&pWriter->aBuf[0], size); if (code) goto _err; // build n = 0; - *(SBlockDataHdr *)(*ppBuf) = hdr; - n += sizeof(hdr); + n += tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT); for (int32_t iDelData = 0; iDelData < taosArrayGetSize(aDelData); iDelData++) { - n += tPutDelData(*ppBuf + n, taosArrayGet(aDelData, iDelData)); + n += tPutDelData(pWriter->aBuf[0] + n, taosArrayGet(aDelData, iDelData)); } - taosCalcChecksumAppend(0, *ppBuf, size); + taosCalcChecksumAppend(0, pWriter->aBuf[0], size); ASSERT(n + sizeof(TSCKSUM) == size); // write - n = taosWriteFile(pWriter->pWriteH, *ppBuf, size); + n = taosWriteFile(pWriter->pWriteH, pWriter->aBuf[0], size); if (n < 0) { code = TAOS_SYSTEM_ERROR(errno); goto _err; @@ -129,48 +128,42 @@ int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, uint8_t **ppBuf pDelIdx->size = size; pWriter->fDel.size += size; - tFree(pBuf); return code; _err: tsdbError("vgId:%d, failed to write del data since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - tFree(pBuf); return code; } -int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx, uint8_t **ppBuf) { +int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx) { int32_t code = 0; int64_t size; int64_t n; - uint8_t *pBuf = NULL; SDelIdx *pDelIdx; - if (!ppBuf) ppBuf = &pBuf; - // prepare - size = 0; - size += tPutU32(NULL, TSDB_FILE_DLMT); + size = sizeof(uint32_t); for (int32_t iDelIdx = 0; iDelIdx < taosArrayGetSize(aDelIdx); iDelIdx++) { size += tPutDelIdx(NULL, taosArrayGet(aDelIdx, iDelIdx)); } size += sizeof(TSCKSUM); // alloc - code = tRealloc(ppBuf, size); + code = tRealloc(&pWriter->aBuf[0], size); if (code) goto _err; // build n = 0; - n += tPutU32(*ppBuf + n, TSDB_FILE_DLMT); + n += tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT); for (int32_t iDelIdx = 0; iDelIdx < taosArrayGetSize(aDelIdx); iDelIdx++) { - n += tPutDelIdx(*ppBuf + n, taosArrayGet(aDelIdx, iDelIdx)); + n += tPutDelIdx(pWriter->aBuf[0] + n, taosArrayGet(aDelIdx, iDelIdx)); } - taosCalcChecksumAppend(0, *ppBuf, size); + taosCalcChecksumAppend(0, pWriter->aBuf[0], size); ASSERT(n + sizeof(TSCKSUM) == size); // write - n = taosWriteFile(pWriter->pWriteH, *ppBuf, size); + n = taosWriteFile(pWriter->pWriteH, pWriter->aBuf[0], size); if (n < 0) { code = TAOS_SYSTEM_ERROR(errno); goto _err; @@ -180,12 +173,10 @@ int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx, uint8_t **ppBuf) pWriter->fDel.offset = pWriter->fDel.size; pWriter->fDel.size += size; - tFree(pBuf); return code; _err: tsdbError("vgId:%d, write del idx failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - tFree(pBuf); return code; } @@ -225,9 +216,11 @@ struct SDelFReader { STsdb *pTsdb; SDelFile fDel; TdFilePtr pReadH; + + uint8_t *aBuf[1]; }; -int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb, uint8_t **ppBuf) { +int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb) { int32_t code = 0; char fname[TSDB_FILENAME_LEN]; SDelFReader *pDelFReader; @@ -252,32 +245,6 @@ int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb goto _err; } -#if 0 - // load and check hdr if buffer is given - if (ppBuf) { - code = tRealloc(ppBuf, TSDB_FHDR_SIZE); - if (code) { - goto _err; - } - - n = taosReadFile(pDelFReader->pReadH, *ppBuf, TSDB_FHDR_SIZE); - if (n < 0) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } else if (n < TSDB_FHDR_SIZE) { - code = TSDB_CODE_FILE_CORRUPTED; - goto _err; - } - - if (!taosCheckChecksumWhole(*ppBuf, TSDB_FHDR_SIZE)) { - code = TSDB_CODE_FILE_CORRUPTED; - goto _err; - } - - // TODO: check the content - } -#endif - _exit: *ppReader = pDelFReader; return code; @@ -297,6 +264,9 @@ int32_t tsdbDelFReaderClose(SDelFReader **ppReader) { code = TAOS_SYSTEM_ERROR(errno); goto _exit; } + for (int32_t iBuf = 0; iBuf < sizeof(pReader->aBuf) / sizeof(uint8_t *); iBuf++) { + tFree(pReader->aBuf[iBuf]); + } taosMemoryFree(pReader); } *ppReader = NULL; @@ -305,16 +275,13 @@ _exit: return code; } -int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData, uint8_t **ppBuf) { - int32_t code = 0; - int64_t offset = pDelIdx->offset; - int64_t size = pDelIdx->size; - int64_t n; - uint8_t *pBuf = NULL; - SBlockDataHdr *pHdr; - SDelData *pDelData = &(SDelData){0}; +int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData) { + int32_t code = 0; + int64_t offset = pDelIdx->offset; + int64_t size = pDelIdx->size; + int64_t n; - if (!ppBuf) ppBuf = &pBuf; + taosArrayClear(aDelData); // seek if (taosLSeekFile(pReader->pReadH, offset, SEEK_SET) < 0) { @@ -323,11 +290,11 @@ int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData } // alloc - code = tRealloc(ppBuf, size); + code = tRealloc(&pReader->aBuf[0], size); if (code) goto _err; // read - n = taosReadFile(pReader->pReadH, *ppBuf, size); + n = taosReadFile(pReader->pReadH, pReader->aBuf[0], size); if (n < 0) { code = TAOS_SYSTEM_ERROR(errno); goto _err; @@ -337,23 +304,21 @@ int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData } // check - if (!taosCheckChecksumWhole(*ppBuf, size)) { + if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) { code = TSDB_CODE_FILE_CORRUPTED; goto _err; } // // decode n = 0; - pHdr = (SBlockDataHdr *)(*ppBuf + n); - ASSERT(pHdr->delimiter == TSDB_FILE_DLMT); - ASSERT(pHdr->suid == pDelIdx->suid); - ASSERT(pHdr->uid == pDelIdx->uid); - n += sizeof(*pHdr); - taosArrayClear(aDelData); + + uint32_t delimiter; + n += tGetU32(pReader->aBuf[0] + n, &delimiter); while (n < size - sizeof(TSCKSUM)) { - n += tGetDelData(*ppBuf + n, pDelData); + SDelData delData; + n += tGetDelData(pReader->aBuf[0] + n, &delData); - if (taosArrayPush(aDelData, pDelData) == NULL) { + if (taosArrayPush(aDelData, &delData) == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _err; } @@ -361,25 +326,20 @@ int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData ASSERT(n == size - sizeof(TSCKSUM)); - tFree(pBuf); return code; _err: tsdbError("vgId:%d, read del data failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code)); - tFree(pBuf); return code; } -int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx, uint8_t **ppBuf) { - int32_t code = 0; - int32_t n; - int64_t offset = pReader->fDel.offset; - int64_t size = pReader->fDel.size - offset; - uint32_t delimiter; - uint8_t *pBuf = NULL; - SDelIdx *pDelIdx = &(SDelIdx){0}; +int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx) { + int32_t code = 0; + int32_t n; + int64_t offset = pReader->fDel.offset; + int64_t size = pReader->fDel.size - offset; - if (!ppBuf) ppBuf = &pBuf; + taosArrayClear(aDelIdx); // seek if (taosLSeekFile(pReader->pReadH, offset, SEEK_SET) < 0) { @@ -388,11 +348,11 @@ int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx, uint8_t **ppBuf) { } // alloc - code = tRealloc(ppBuf, size); + code = tRealloc(&pReader->aBuf[0], size); if (code) goto _err; // read - n = taosReadFile(pReader->pReadH, *ppBuf, size); + n = taosReadFile(pReader->pReadH, pReader->aBuf[0], size); if (n < 0) { code = TAOS_SYSTEM_ERROR(errno); goto _err; @@ -402,21 +362,23 @@ int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx, uint8_t **ppBuf) { } // check - if (!taosCheckChecksumWhole(*ppBuf, size)) { + if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) { code = TSDB_CODE_FILE_CORRUPTED; goto _err; } // decode n = 0; - n += tGetU32(*ppBuf + n, &delimiter); + uint32_t delimiter; + n += tGetU32(pReader->aBuf[0] + n, &delimiter); ASSERT(delimiter == TSDB_FILE_DLMT); - taosArrayClear(aDelIdx); while (n < size - sizeof(TSCKSUM)) { - n += tGetDelIdx(*ppBuf + n, pDelIdx); + SDelIdx delIdx; - if (taosArrayPush(aDelIdx, pDelIdx) == NULL) { + n += tGetDelIdx(pReader->aBuf[0] + n, &delIdx); + + if (taosArrayPush(aDelIdx, &delIdx) == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _err; } @@ -424,12 +386,10 @@ int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx, uint8_t **ppBuf) { ASSERT(n == size - sizeof(TSCKSUM)); - tFree(pBuf); return code; _err: tsdbError("vgId:%d, read del idx failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code)); - tFree(pBuf); return code; } @@ -441,6 +401,8 @@ struct SDataFReader { TdFilePtr pDataFD; TdFilePtr pLastFD; TdFilePtr pSmaFD; + + uint8_t *aBuf[3]; }; int32_t tsdbDataFReaderOpen(SDataFReader **ppReader, STsdb *pTsdb, SDFileSet *pSet) { @@ -523,6 +485,10 @@ int32_t tsdbDataFReaderClose(SDataFReader **ppReader) { goto _err; } + for (int32_t iBuf = 0; iBuf < sizeof((*ppReader)->aBuf) / sizeof(uint8_t *); iBuf++) { + tFree((*ppReader)->aBuf[iBuf]); + } + taosMemoryFree(*ppReader); _exit: @@ -534,19 +500,20 @@ _err: return code; } -int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx, uint8_t **ppBuf) { - int32_t code = 0; - int64_t offset = pReader->pSet->pHeadF->offset; - int64_t size = pReader->pSet->pHeadF->size - offset; - uint8_t *pBuf = NULL; - int64_t n; - uint32_t delimiter; - SBlockIdx blockIdx; +int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx) { + int32_t code = 0; + int64_t offset = pReader->pSet->pHeadF->offset; + int64_t size = pReader->pSet->pHeadF->size - offset; + int64_t n; + uint32_t delimiter; - if (!ppBuf) ppBuf = &pBuf; + taosArrayClear(aBlockIdx); + if (size == 0) { + goto _exit; + } // alloc - code = tRealloc(ppBuf, size); + code = tRealloc(&pReader->aBuf[0], size); if (code) goto _err; // seek @@ -556,7 +523,7 @@ int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx, uint8_t **ppB } // read - n = taosReadFile(pReader->pHeadFD, *ppBuf, size); + n = taosReadFile(pReader->pHeadFD, pReader->aBuf[0], size); if (n < 0) { code = TAOS_SYSTEM_ERROR(errno); goto _err; @@ -566,19 +533,19 @@ int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx, uint8_t **ppB } // check - if (!taosCheckChecksumWhole(*ppBuf, size)) { + if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) { code = TSDB_CODE_FILE_CORRUPTED; goto _err; } // decode n = 0; - n = tGetU32(*ppBuf + n, &delimiter); + n = tGetU32(pReader->aBuf[0] + n, &delimiter); ASSERT(delimiter == TSDB_FILE_DLMT); - taosArrayClear(aBlockIdx); while (n < size - sizeof(TSCKSUM)) { - n += tGetBlockIdx(*ppBuf + n, &blockIdx); + SBlockIdx blockIdx; + n += tGetBlockIdx(pReader->aBuf[0] + n, &blockIdx); if (taosArrayPush(aBlockIdx, &blockIdx) == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; @@ -588,38 +555,38 @@ int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx, uint8_t **ppB ASSERT(n + sizeof(TSCKSUM) == size); - tFree(pBuf); +_exit: return code; _err: tsdbError("vgId:%d, read block idx failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code)); - tFree(pBuf); return code; } -int32_t tsdbReadBlock(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *mBlock, uint8_t **ppBuf) { - int32_t code = 0; - int64_t offset = pBlockIdx->offset; - int64_t size = pBlockIdx->size; - uint8_t *pBuf = NULL; - int64_t n; - int64_t tn; - SBlockDataHdr hdr; +int32_t tsdbReadBlockL(SDataFReader *pReader, SArray *aBlockL) { + int32_t code = 0; + int64_t offset = pReader->pSet->pLastF->offset; + int64_t size = pReader->pSet->pLastF->size - offset; + int64_t n; + uint32_t delimiter; - if (!ppBuf) ppBuf = &pBuf; + taosArrayClear(aBlockL); + if (size == 0) { + goto _exit; + } // alloc - code = tRealloc(ppBuf, size); + code = tRealloc(&pReader->aBuf[0], size); if (code) goto _err; // seek - if (taosLSeekFile(pReader->pHeadFD, offset, SEEK_SET) < 0) { + if (taosLSeekFile(pReader->pLastFD, offset, SEEK_SET) < 0) { code = TAOS_SYSTEM_ERROR(errno); goto _err; } // read - n = taosReadFile(pReader->pHeadFD, *ppBuf, size); + n = taosReadFile(pReader->pLastFD, pReader->aBuf[0], size); if (n < 0) { code = TAOS_SYSTEM_ERROR(errno); goto _err; @@ -629,245 +596,116 @@ int32_t tsdbReadBlock(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *mBl } // check - if (!taosCheckChecksumWhole(*ppBuf, size)) { + if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) { code = TSDB_CODE_FILE_CORRUPTED; goto _err; } // decode - hdr = *(SBlockDataHdr *)(*ppBuf); - ASSERT(hdr.delimiter == TSDB_FILE_DLMT); - ASSERT(hdr.suid == pBlockIdx->suid); - ASSERT(hdr.uid == pBlockIdx->uid); + n = 0; + n = tGetU32(pReader->aBuf[0] + n, &delimiter); + ASSERT(delimiter == TSDB_FILE_DLMT); - n = sizeof(hdr); - tn = tGetMapData(*ppBuf + n, mBlock); - if (tn < 0) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; + while (n < size - sizeof(TSCKSUM)) { + SBlockL blockl; + n += tGetBlockL(pReader->aBuf[0] + n, &blockl); + + if (taosArrayPush(aBlockL, &blockl) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } } - n += tn; + ASSERT(n + sizeof(TSCKSUM) == size); - tFree(pBuf); +_exit: return code; _err: - tsdbError("vgId:%d, read block failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code)); - tFree(pBuf); + tsdbError("vgId:%d read blockl failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code)); return code; } -static int32_t tsdbReadBlockDataKey(SBlockData *pBlockData, SSubBlock *pSubBlock, uint8_t *pBuf, uint8_t **ppBuf) { +int32_t tsdbReadBlock(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *mBlock) { int32_t code = 0; - int64_t size = pSubBlock->szVersion + pSubBlock->szTSKEY + sizeof(TSCKSUM); + int64_t offset = pBlockIdx->offset; + int64_t size = pBlockIdx->size; int64_t n; + int64_t tn; - if (!taosCheckChecksumWhole(pBuf, size)) { - code = TSDB_CODE_FILE_CORRUPTED; - goto _err; - } - - code = tRealloc((uint8_t **)&pBlockData->aVersion, sizeof(int64_t) * pSubBlock->nRow); - if (code) goto _err; - code = tRealloc((uint8_t **)&pBlockData->aTSKEY, sizeof(TSKEY) * pSubBlock->nRow); + // alloc + code = tRealloc(&pReader->aBuf[0], size); if (code) goto _err; - if (pSubBlock->cmprAlg == NO_COMPRESSION) { - ASSERT(pSubBlock->szVersion == sizeof(int64_t) * pSubBlock->nRow); - ASSERT(pSubBlock->szTSKEY == sizeof(TSKEY) * pSubBlock->nRow); - - // VERSION - memcpy(pBlockData->aVersion, pBuf, pSubBlock->szVersion); - - // TSKEY - memcpy(pBlockData->aTSKEY, pBuf + pSubBlock->szVersion, pSubBlock->szTSKEY); - } else { - size = sizeof(int64_t) * pSubBlock->nRow + COMP_OVERFLOW_BYTES; - if (pSubBlock->cmprAlg == TWO_STAGE_COMP) { - code = tRealloc(ppBuf, size); - if (code) goto _err; - } - - // VERSION - n = tsDecompressBigint(pBuf, pSubBlock->szVersion, pSubBlock->nRow, (char *)pBlockData->aVersion, - sizeof(int64_t) * pSubBlock->nRow, pSubBlock->cmprAlg, *ppBuf, size); - if (n < 0) { - code = TSDB_CODE_COMPRESS_ERROR; - goto _err; - } - - // TSKEY - n = tsDecompressTimestamp(pBuf + pSubBlock->szVersion, pSubBlock->szTSKEY, pSubBlock->nRow, - (char *)pBlockData->aTSKEY, sizeof(TSKEY) * pSubBlock->nRow, pSubBlock->cmprAlg, *ppBuf, - size); - if (n < 0) { - code = TSDB_CODE_COMPRESS_ERROR; - goto _err; - } + // seek + if (taosLSeekFile(pReader->pHeadFD, offset, SEEK_SET) < 0) { + code = TAOS_SYSTEM_ERROR(errno); + goto _err; } - return code; - -_err: - return code; -} - -static int32_t tsdbReadColDataImpl(SSubBlock *pSubBlock, SBlockCol *pBlockCol, SColData *pColData, uint8_t *pBuf, - uint8_t **ppBuf) { - int32_t code = 0; - int64_t size; - int64_t n; - - if (!taosCheckChecksumWhole(pBuf, pBlockCol->szBitmap + pBlockCol->szOffset + pBlockCol->szValue + sizeof(TSCKSUM))) { + // read + n = taosReadFile(pReader->pHeadFD, pReader->aBuf[0], size); + if (n < 0) { + code = TAOS_SYSTEM_ERROR(errno); + goto _err; + } else if (n < size) { code = TSDB_CODE_FILE_CORRUPTED; goto _err; } - pColData->nVal = pSubBlock->nRow; - pColData->flag = pBlockCol->flag; - - // BITMAP - if (pBlockCol->flag != HAS_VALUE) { - ASSERT(pBlockCol->szBitmap); - - size = BIT2_SIZE(pColData->nVal); - code = tRealloc(&pColData->pBitMap, size); - if (code) goto _err; - - code = tRealloc(ppBuf, size + COMP_OVERFLOW_BYTES); - if (code) goto _err; - - n = tsDecompressTinyint(pBuf, pBlockCol->szBitmap, size, pColData->pBitMap, size, TWO_STAGE_COMP, *ppBuf, - size + COMP_OVERFLOW_BYTES); - if (n <= 0) { - code = TSDB_CODE_COMPRESS_ERROR; - goto _err; - } - - ASSERT(n == size); - } else { - ASSERT(pBlockCol->szBitmap == 0); - } - pBuf = pBuf + pBlockCol->szBitmap; - - // OFFSET - if (IS_VAR_DATA_TYPE(pColData->type)) { - ASSERT(pBlockCol->szOffset); - - size = sizeof(int32_t) * pColData->nVal; - code = tRealloc((uint8_t **)&pColData->aOffset, size); - if (code) goto _err; - - code = tRealloc(ppBuf, size + COMP_OVERFLOW_BYTES); - if (code) goto _err; - - n = tsDecompressInt(pBuf, pBlockCol->szOffset, pColData->nVal, (char *)pColData->aOffset, size, TWO_STAGE_COMP, - *ppBuf, size + COMP_OVERFLOW_BYTES); - if (n <= 0) { - code = TSDB_CODE_COMPRESS_ERROR; - goto _err; - } - - ASSERT(n == size); - } else { - ASSERT(pBlockCol->szOffset == 0); - } - pBuf = pBuf + pBlockCol->szOffset; - - // VALUE - pColData->nData = pBlockCol->szOrigin; - - code = tRealloc(&pColData->pData, pColData->nData); - if (code) goto _err; - - if (pSubBlock->cmprAlg == NO_COMPRESSION) { - memcpy(pColData->pData, pBuf, pColData->nData); - } else { - if (pSubBlock->cmprAlg == TWO_STAGE_COMP) { - code = tRealloc(ppBuf, pColData->nData + COMP_OVERFLOW_BYTES); - if (code) goto _err; - } - - n = tDataTypes[pBlockCol->type].decompFunc(pBuf, pBlockCol->szValue, pSubBlock->nRow, pColData->pData, - pColData->nData, pSubBlock->cmprAlg, *ppBuf, - pColData->nData + COMP_OVERFLOW_BYTES); - if (n < 0) { - code = TSDB_CODE_COMPRESS_ERROR; - goto _err; - } - - ASSERT(n == pColData->nData); - } - - return code; - -_err: - return code; -} - -static int32_t tsdbReadBlockCol(SSubBlock *pSubBlock, uint8_t *p, SArray *aBlockCol) { - int32_t code = 0; - int32_t n = 0; - SBlockCol blockCol; - SBlockCol *pBlockCol = &blockCol; - - if (!taosCheckChecksumWhole(p, pSubBlock->szBlockCol + sizeof(TSCKSUM))) { + // check + if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) { code = TSDB_CODE_FILE_CORRUPTED; goto _err; } - n += sizeof(SBlockDataHdr); - while (n < pSubBlock->szBlockCol) { - n += tGetBlockCol(p + n, pBlockCol); + // decode + n = 0; - if (taosArrayPush(aBlockCol, pBlockCol) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } - } + uint32_t delimiter; + n += tGetU32(pReader->aBuf[0] + n, &delimiter); + ASSERT(delimiter == TSDB_FILE_DLMT); - ASSERT(n == pSubBlock->szBlockCol); + tn = tGetMapData(pReader->aBuf[0] + n, mBlock); + if (tn < 0) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + n += tn; + ASSERT(n + sizeof(TSCKSUM) == size); return code; _err: + tsdbError("vgId:%d, read block failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code)); return code; } -static int32_t tsdbReadSubColData(SDataFReader *pReader, SBlockIdx *pBlockIdx, SBlock *pBlock, int32_t iSubBlock, - int16_t *aColId, int32_t nCol, SBlockData *pBlockData, uint8_t **ppBuf1, - uint8_t **ppBuf2) { - TdFilePtr pFD = pBlock->last ? pReader->pLastFD : pReader->pDataFD; - SSubBlock *pSubBlock = &pBlock->aSubBlock[iSubBlock]; - SArray *aBlockCol = NULL; - int32_t code = 0; - int64_t offset; - int64_t size; - int64_t n; +int32_t tsdbReadBlockSma(SDataFReader *pReader, SBlock *pBlock, SArray *aColumnDataAgg) { + int32_t code = 0; + SSmaInfo *pSmaInfo = &pBlock->smaInfo; - tBlockDataReset(pBlockData); - pBlockData->nRow = pSubBlock->nRow; + ASSERT(pSmaInfo->size > 0); - // TSDBKEY and SBlockCol - if (nCol == 1) { - offset = pSubBlock->offset + pSubBlock->szBlockCol + sizeof(TSCKSUM); - size = pSubBlock->szVersion + pSubBlock->szTSKEY + sizeof(TSCKSUM); - } else { - offset = pSubBlock->offset; - size = pSubBlock->szBlockCol + sizeof(TSCKSUM) + pSubBlock->szVersion + pSubBlock->szTSKEY + sizeof(TSCKSUM); - } + taosArrayClear(aColumnDataAgg); - code = tRealloc(ppBuf1, size); + // alloc + int32_t size = pSmaInfo->size + sizeof(TSCKSUM); + code = tRealloc(&pReader->aBuf[0], size); if (code) goto _err; - n = taosLSeekFile(pFD, offset, SEEK_SET); + // seek + int64_t n = taosLSeekFile(pReader->pSmaFD, pSmaInfo->offset, SEEK_SET); if (n < 0) { code = TAOS_SYSTEM_ERROR(errno); goto _err; + } else if (n < pSmaInfo->offset) { + code = TSDB_CODE_FILE_CORRUPTED; + goto _err; } - n = taosReadFile(pFD, *ppBuf1, size); + // read + n = taosReadFile(pReader->pSmaFD, pReader->aBuf[0], size); if (n < 0) { code = TAOS_SYSTEM_ERROR(errno); goto _err; @@ -876,337 +714,203 @@ static int32_t tsdbReadSubColData(SDataFReader *pReader, SBlockIdx *pBlockIdx, S goto _err; } - if (nCol == 1) { - code = tsdbReadBlockDataKey(pBlockData, pSubBlock, *ppBuf1, ppBuf2); - if (code) goto _err; - - goto _exit; - } else { - aBlockCol = taosArrayInit(0, sizeof(SBlockCol)); - if (aBlockCol == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } - - code = tsdbReadBlockCol(pSubBlock, *ppBuf1, aBlockCol); - if (code) goto _err; - - code = tsdbReadBlockDataKey(pBlockData, pSubBlock, *ppBuf1 + pSubBlock->szBlockCol + sizeof(TSCKSUM), ppBuf2); - if (code) goto _err; + // check + if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) { + code = TSDB_CODE_FILE_CORRUPTED; + goto _err; } - for (int32_t iCol = 1; iCol < nCol; iCol++) { - void *p = taosArraySearch(aBlockCol, &(SBlockCol){.cid = aColId[iCol]}, tBlockColCmprFn, TD_EQ); - - if (p) { - SBlockCol *pBlockCol = (SBlockCol *)p; - SColData *pColData; - - ASSERT(pBlockCol->flag && pBlockCol->flag != HAS_NONE); - - code = tBlockDataAddColData(pBlockData, taosArrayGetSize(pBlockData->aIdx), &pColData); - if (code) goto _err; - - tColDataInit(pColData, pBlockCol->cid, pBlockCol->type, pBlockCol->smaOn); - if (pBlockCol->flag == HAS_NULL) { - for (int32_t iRow = 0; iRow < pSubBlock->nRow; iRow++) { - code = tColDataAppendValue(pColData, &COL_VAL_NULL(pBlockCol->cid, pBlockCol->type)); - if (code) goto _err; - } - } else { - offset = pSubBlock->offset + pSubBlock->szBlockCol + sizeof(TSCKSUM) + pSubBlock->szVersion + - pSubBlock->szTSKEY + sizeof(TSCKSUM) + pBlockCol->offset; - size = pBlockCol->szBitmap + pBlockCol->szOffset + pBlockCol->szValue + sizeof(TSCKSUM); - - code = tRealloc(ppBuf1, size); - if (code) goto _err; - - // seek - n = taosLSeekFile(pFD, offset, SEEK_SET); - if (n < 0) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - // read - n = taosReadFile(pFD, *ppBuf1, size); - if (n < 0) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } else if (n < size) { - code = TSDB_CODE_FILE_CORRUPTED; - goto _err; - } + // decode + n = 0; + while (n < pSmaInfo->size) { + SColumnDataAgg sma; - code = tsdbReadColDataImpl(pSubBlock, pBlockCol, pColData, *ppBuf1, ppBuf2); - if (code) goto _err; - } + n += tGetColumnDataAgg(pReader->aBuf[0] + n, &sma); + if (taosArrayPush(aColumnDataAgg, &sma) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; } } -_exit: - taosArrayDestroy(aBlockCol); return code; _err: - taosArrayDestroy(aBlockCol); + tsdbError("vgId:%d tsdb read block sma failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code)); return code; } -int32_t tsdbReadColData(SDataFReader *pReader, SBlockIdx *pBlockIdx, SBlock *pBlock, int16_t *aColId, int32_t nCol, - SBlockData *pBlockData, uint8_t **ppBuf1, uint8_t **ppBuf2) { - int32_t code = 0; - uint8_t *pBuf1 = NULL; - uint8_t *pBuf2 = NULL; +static int32_t tsdbReadBlockDataImpl(SDataFReader *pReader, SBlockInfo *pBlkInfo, int8_t fromLast, + SBlockData *pBlockData) { + int32_t code = 0; - ASSERT(aColId[0] == PRIMARYKEY_TIMESTAMP_COL_ID); + tBlockDataClear(pBlockData); - if (!ppBuf1) ppBuf1 = &pBuf1; - if (!ppBuf2) ppBuf2 = &pBuf2; + TdFilePtr pFD = fromLast ? pReader->pLastFD : pReader->pDataFD; - code = tsdbReadSubColData(pReader, pBlockIdx, pBlock, 0, aColId, nCol, pBlockData, ppBuf1, ppBuf2); + // uid + version + tskey + code = tsdbReadAndCheck(pFD, pBlkInfo->offset, &pReader->aBuf[0], pBlkInfo->szKey, 1); if (code) goto _err; + SDiskDataHdr hdr; + uint8_t *p = pReader->aBuf[0] + tGetDiskDataHdr(pReader->aBuf[0], &hdr); - if (pBlock->nSubBlock > 1) { - SBlockData *pBlockData1 = &(SBlockData){0}; - SBlockData *pBlockData2 = &(SBlockData){0}; - - tBlockDataInit(pBlockData1); - tBlockDataInit(pBlockData2); - for (int32_t iSubBlock = 1; iSubBlock < pBlock->nSubBlock; iSubBlock++) { - code = tsdbReadSubColData(pReader, pBlockIdx, pBlock, iSubBlock, aColId, nCol, pBlockData1, ppBuf1, ppBuf2); - if (code) goto _err; + ASSERT(hdr.delimiter == TSDB_FILE_DLMT); + ASSERT(pBlockData->suid == hdr.suid); + ASSERT(pBlockData->uid == hdr.uid); - code = tBlockDataCopy(pBlockData, pBlockData2); - if (code) { - tBlockDataClear(pBlockData1, 1); - tBlockDataClear(pBlockData2, 1); - goto _err; - } + pBlockData->nRow = hdr.nRow; - code = tBlockDataMerge(pBlockData1, pBlockData2, pBlockData); - if (code) { - tBlockDataClear(pBlockData1, 1); - tBlockDataClear(pBlockData2, 1); - goto _err; - } - } - - tBlockDataClear(pBlockData1, 1); - tBlockDataClear(pBlockData2, 1); + // uid + if (hdr.uid == 0) { + ASSERT(hdr.szUid); + code = tsdbDecmprData(p, hdr.szUid, TSDB_DATA_TYPE_BIGINT, hdr.cmprAlg, (uint8_t **)&pBlockData->aUid, + sizeof(int64_t) * hdr.nRow, &pReader->aBuf[1]); + if (code) goto _err; + } else { + ASSERT(!hdr.szUid); } + p += hdr.szUid; - tFree(pBuf1); - tFree(pBuf2); - return code; - -_err: - tsdbError("vgId:%d, tsdb read col data failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code)); - tFree(pBuf1); - tFree(pBuf2); - return code; -} - -static int32_t tsdbReadSubBlockData(SDataFReader *pReader, SBlockIdx *pBlockIdx, SBlock *pBlock, int32_t iSubBlock, - SBlockData *pBlockData, uint8_t **ppBuf1, uint8_t **ppBuf2) { - int32_t code = 0; - uint8_t *p; - int64_t size; - int64_t n; - TdFilePtr pFD = pBlock->last ? pReader->pLastFD : pReader->pDataFD; - SSubBlock *pSubBlock = &pBlock->aSubBlock[iSubBlock]; - SArray *aBlockCol = NULL; - - tBlockDataReset(pBlockData); + // version + code = tsdbDecmprData(p, hdr.szVer, TSDB_DATA_TYPE_BIGINT, hdr.cmprAlg, (uint8_t **)&pBlockData->aVersion, + sizeof(int64_t) * hdr.nRow, &pReader->aBuf[1]); + if (code) goto _err; + p += hdr.szVer; - // realloc - code = tRealloc(ppBuf1, pSubBlock->szBlock); + // TSKEY + code = tsdbDecmprData(p, hdr.szKey, TSDB_DATA_TYPE_TIMESTAMP, hdr.cmprAlg, (uint8_t **)&pBlockData->aTSKEY, + sizeof(TSKEY) * hdr.nRow, &pReader->aBuf[1]); if (code) goto _err; + p += hdr.szKey; - // seek - n = taosLSeekFile(pFD, pSubBlock->offset, SEEK_SET); - if (n < 0) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } + ASSERT(p - pReader->aBuf[0] == pBlkInfo->szKey - sizeof(TSCKSUM)); - // read - n = taosReadFile(pFD, *ppBuf1, pSubBlock->szBlock); - if (n < 0) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } else if (n < pSubBlock->szBlock) { - code = TSDB_CODE_FILE_CORRUPTED; - goto _err; - } + // read and decode columns + if (taosArrayGetSize(pBlockData->aIdx) == 0) goto _exit; - pBlockData->nRow = pSubBlock->nRow; + if (hdr.szBlkCol > 0) { + int64_t offset = pBlkInfo->offset + pBlkInfo->szKey; + code = tsdbReadAndCheck(pFD, offset, &pReader->aBuf[0], hdr.szBlkCol + sizeof(TSCKSUM), 1); + if (code) goto _err; + } - // TSDBKEY - p = *ppBuf1 + pSubBlock->szBlockCol + sizeof(TSCKSUM); - code = tsdbReadBlockDataKey(pBlockData, pSubBlock, p, ppBuf2); - if (code) goto _err; + SBlockCol blockCol = {.cid = 0}; + SBlockCol *pBlockCol = &blockCol; + int32_t n = 0; - // COLUMNS - aBlockCol = taosArrayInit(0, sizeof(SBlockCol)); - if (aBlockCol == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } + for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) { + SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData); - code = tsdbReadBlockCol(pSubBlock, *ppBuf1, aBlockCol); - if (code) goto _err; + while (pBlockCol && pBlockCol->cid < pColData->cid) { + if (n < hdr.szBlkCol) { + n += tGetBlockCol(pReader->aBuf[0] + n, pBlockCol); + } else { + ASSERT(n == hdr.szBlkCol); + pBlockCol = NULL; + } + } - for (int32_t iBlockCol = 0; iBlockCol < taosArrayGetSize(aBlockCol); iBlockCol++) { - SColData *pColData; - SBlockCol *pBlockCol = (SBlockCol *)taosArrayGet(aBlockCol, iBlockCol); + if (pBlockCol == NULL || pBlockCol->cid > pColData->cid) { + // add a lot of NONE + for (int32_t iRow = 0; iRow < hdr.nRow; iRow++) { + code = tColDataAppendValue(pColData, &COL_VAL_NONE(pColData->cid, pColData->type)); + if (code) goto _err; + } + } else { + ASSERT(pBlockCol->type == pColData->type); + ASSERT(pBlockCol->flag && pBlockCol->flag != HAS_NONE); - ASSERT(pBlockCol->flag && pBlockCol->flag != HAS_NONE); + if (pBlockCol->flag == HAS_NULL) { + // add a lot of NULL + for (int32_t iRow = 0; iRow < hdr.nRow; iRow++) { + code = tColDataAppendValue(pColData, &COL_VAL_NULL(pBlockCol->cid, pBlockCol->type)); + if (code) goto _err; + } + } else { + // decode from binary + int64_t offset = pBlkInfo->offset + pBlkInfo->szKey + hdr.szBlkCol + sizeof(TSCKSUM) + pBlockCol->offset; + int32_t size = pBlockCol->szBitmap + pBlockCol->szOffset + pBlockCol->szValue + sizeof(TSCKSUM); - code = tBlockDataAddColData(pBlockData, iBlockCol, &pColData); - if (code) goto _err; + code = tsdbReadAndCheck(pFD, offset, &pReader->aBuf[1], size, 0); + if (code) goto _err; - tColDataInit(pColData, pBlockCol->cid, pBlockCol->type, pBlockCol->smaOn); - if (pBlockCol->flag == HAS_NULL) { - for (int32_t iRow = 0; iRow < pSubBlock->nRow; iRow++) { - code = tColDataAppendValue(pColData, &COL_VAL_NULL(pBlockCol->cid, pBlockCol->type)); + code = tsdbDecmprColData(pReader->aBuf[1], pBlockCol, hdr.cmprAlg, hdr.nRow, pColData, &pReader->aBuf[2]); if (code) goto _err; } - } else { - p = *ppBuf1 + pSubBlock->szBlockCol + sizeof(TSCKSUM) + pSubBlock->szVersion + pSubBlock->szTSKEY + - sizeof(TSCKSUM) + pBlockCol->offset; - code = tsdbReadColDataImpl(pSubBlock, pBlockCol, pColData, p, ppBuf2); - if (code) goto _err; } } - taosArrayDestroy(aBlockCol); +_exit: return code; _err: - tsdbError("vgId:%d, tsdb read sub block data failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code)); - taosArrayDestroy(aBlockCol); + tsdbError("vgId:%d tsdb read block data impl failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code)); return code; } -int32_t tsdbReadBlockData(SDataFReader *pReader, SBlockIdx *pBlockIdx, SBlock *pBlock, SBlockData *pBlockData, - uint8_t **ppBuf1, uint8_t **ppBuf2) { - int32_t code = 0; - TdFilePtr pFD = pBlock->last ? pReader->pLastFD : pReader->pDataFD; - uint8_t *pBuf1 = NULL; - uint8_t *pBuf2 = NULL; - int32_t iSubBlock; - - if (!ppBuf1) ppBuf1 = &pBuf1; - if (!ppBuf2) ppBuf2 = &pBuf2; +int32_t tsdbReadDataBlock(SDataFReader *pReader, SBlock *pBlock, SBlockData *pBlockData) { + int32_t code = 0; - // read the first sub-block - iSubBlock = 0; - code = tsdbReadSubBlockData(pReader, pBlockIdx, pBlock, iSubBlock, pBlockData, ppBuf1, ppBuf2); + code = tsdbReadBlockDataImpl(pReader, &pBlock->aSubBlock[0], 0, pBlockData); if (code) goto _err; - // read remain block data and do merg if (pBlock->nSubBlock > 1) { - SBlockData *pBlockData1 = &(SBlockData){0}; - SBlockData *pBlockData2 = &(SBlockData){0}; + SBlockData bData1; + SBlockData bData2; - tBlockDataInit(pBlockData1); - tBlockDataInit(pBlockData2); - for (iSubBlock = 1; iSubBlock < pBlock->nSubBlock; iSubBlock++) { - code = tsdbReadSubBlockData(pReader, pBlockIdx, pBlock, iSubBlock, pBlockData1, ppBuf1, ppBuf2); + // create + code = tBlockDataCreate(&bData1); + if (code) goto _err; + code = tBlockDataCreate(&bData2); + if (code) goto _err; + + // init + tBlockDataInitEx(&bData1, pBlockData); + tBlockDataInitEx(&bData2, pBlockData); + + for (int32_t iSubBlock = 1; iSubBlock < pBlock->nSubBlock; iSubBlock++) { + code = tsdbReadBlockDataImpl(pReader, &pBlock->aSubBlock[iSubBlock], 0, &bData1); if (code) { - tBlockDataClear(pBlockData1, 1); - tBlockDataClear(pBlockData2, 1); + tBlockDataDestroy(&bData1, 1); + tBlockDataDestroy(&bData2, 1); goto _err; } - code = tBlockDataCopy(pBlockData, pBlockData2); + code = tBlockDataCopy(pBlockData, &bData2); if (code) { - tBlockDataClear(pBlockData1, 1); - tBlockDataClear(pBlockData2, 1); + tBlockDataDestroy(&bData1, 1); + tBlockDataDestroy(&bData2, 1); goto _err; } - // merge two block data - code = tBlockDataMerge(pBlockData1, pBlockData2, pBlockData); + code = tBlockDataMerge(&bData1, &bData2, pBlockData); if (code) { - tBlockDataClear(pBlockData1, 1); - tBlockDataClear(pBlockData2, 1); + tBlockDataDestroy(&bData1, 1); + tBlockDataDestroy(&bData2, 1); goto _err; } } - tBlockDataClear(pBlockData1, 1); - tBlockDataClear(pBlockData2, 1); + tBlockDataDestroy(&bData1, 1); + tBlockDataDestroy(&bData2, 1); } - ASSERT(pBlock->nRow == pBlockData->nRow); - ASSERT(tsdbKeyCmprFn(&pBlock->minKey, &TSDBROW_KEY(&tBlockDataFirstRow(pBlockData))) == 0); - ASSERT(tsdbKeyCmprFn(&pBlock->maxKey, &TSDBROW_KEY(&tBlockDataLastRow(pBlockData))) == 0); - - if (pBuf1) tFree(pBuf1); - if (pBuf2) tFree(pBuf2); return code; _err: - tsdbError("vgId:%d, tsdb read block data failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code)); - if (pBuf1) tFree(pBuf1); - if (pBuf2) tFree(pBuf2); + tsdbError("vgId:%d tsdb read data block failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code)); return code; } -int32_t tsdbReadBlockSma(SDataFReader *pReader, SBlock *pBlock, SArray *aColumnDataAgg, uint8_t **ppBuf) { - int32_t code = 0; - TdFilePtr pFD = pReader->pSmaFD; - int64_t offset = pBlock->aSubBlock[0].sOffset; - int64_t size = pBlock->aSubBlock[0].nSma * sizeof(SColumnDataAgg) + sizeof(TSCKSUM); - uint8_t *pBuf = NULL; - int64_t n; - - ASSERT(tBlockHasSma(pBlock)); +int32_t tsdbReadLastBlock(SDataFReader *pReader, SBlockL *pBlockL, SBlockData *pBlockData) { + int32_t code = 0; - if (!ppBuf) ppBuf = &pBuf; - code = tRealloc(ppBuf, size); + code = tsdbReadBlockDataImpl(pReader, &pBlockL->bInfo, 1, pBlockData); if (code) goto _err; - // lseek - n = taosLSeekFile(pFD, offset, SEEK_SET); - if (n < 0) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } - - // read - n = taosReadFile(pFD, *ppBuf, size); - if (n < 0) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } else if (n < size) { - code = TSDB_CODE_FILE_CORRUPTED; - goto _err; - } - - // check - if (!taosCheckChecksumWhole(*ppBuf, size)) { - code = TSDB_CODE_FILE_CORRUPTED; - goto _err; - } - - // decode - taosArrayClear(aColumnDataAgg); - for (int32_t iSma = 0; iSma < pBlock->aSubBlock[0].nSma; iSma++) { - if (taosArrayPush(aColumnDataAgg, &((SColumnDataAgg *)(*ppBuf))[iSma]) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } - } - - tFree(pBuf); return code; _err: - tsdbError("vgId:%d, read block sma failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code)); - tFree(pBuf); + tsdbError("vgId:%d tsdb read last block failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code)); return code; } @@ -1225,6 +929,7 @@ int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pS code = TSDB_CODE_OUT_OF_MEMORY; goto _err; } + if (code) goto _err; pWriter->pTsdb = pTsdb; pWriter->wSet = (SDFileSet){.diskId = pSet->diskId, .fid = pSet->fid, @@ -1357,10 +1062,11 @@ _err: int32_t tsdbDataFWriterClose(SDataFWriter **ppWriter, int8_t sync) { int32_t code = 0; - STsdb *pTsdb = (*ppWriter)->pTsdb; + STsdb *pTsdb = NULL; if (*ppWriter == NULL) goto _exit; + pTsdb = (*ppWriter)->pTsdb; if (sync) { if (taosFsyncFile((*ppWriter)->pHeadFD) < 0) { code = TAOS_SYSTEM_ERROR(errno); @@ -1403,6 +1109,9 @@ int32_t tsdbDataFWriterClose(SDataFWriter **ppWriter, int8_t sync) { goto _err; } + for (int32_t iBuf = 0; iBuf < sizeof((*ppWriter)->aBuf) / sizeof(uint8_t *); iBuf++) { + tFree((*ppWriter)->aBuf[iBuf]); + } taosMemoryFree(*ppWriter); _exit: *ppWriter = NULL; @@ -1493,38 +1202,41 @@ _err: return code; } -int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx, uint8_t **ppBuf) { +int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx) { int32_t code = 0; SHeadFile *pHeadFile = &pWriter->fHead; - uint8_t *pBuf = NULL; - int64_t size; + int64_t size = 0; int64_t n; - if (!ppBuf) ppBuf = &pBuf; + // check + if (taosArrayGetSize(aBlockIdx) == 0) { + pHeadFile->offset = pHeadFile->size; + goto _exit; + } // prepare - size = tPutU32(NULL, TSDB_FILE_DLMT); + size = sizeof(uint32_t); for (int32_t iBlockIdx = 0; iBlockIdx < taosArrayGetSize(aBlockIdx); iBlockIdx++) { size += tPutBlockIdx(NULL, taosArrayGet(aBlockIdx, iBlockIdx)); } size += sizeof(TSCKSUM); // alloc - code = tRealloc(ppBuf, size); + code = tRealloc(&pWriter->aBuf[0], size); if (code) goto _err; // build n = 0; - n = tPutU32(*ppBuf + n, TSDB_FILE_DLMT); + n = tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT); for (int32_t iBlockIdx = 0; iBlockIdx < taosArrayGetSize(aBlockIdx); iBlockIdx++) { - n += tPutBlockIdx(*ppBuf + n, taosArrayGet(aBlockIdx, iBlockIdx)); + n += tPutBlockIdx(pWriter->aBuf[0] + n, taosArrayGet(aBlockIdx, iBlockIdx)); } - taosCalcChecksumAppend(0, *ppBuf, size); + taosCalcChecksumAppend(0, pWriter->aBuf[0], size); ASSERT(n + sizeof(TSCKSUM) == size); // write - n = taosWriteFile(pWriter->pHeadFD, *ppBuf, size); + n = taosWriteFile(pWriter->pHeadFD, pWriter->aBuf[0], size); if (n < 0) { code = TAOS_SYSTEM_ERROR(errno); goto _err; @@ -1534,44 +1246,39 @@ int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx, uint8_t **pp pHeadFile->offset = pHeadFile->size; pHeadFile->size += size; - tFree(pBuf); +_exit: + tsdbTrace("vgId:%d write block idx, offset:%" PRId64 " size:%" PRId64 " nBlockIdx:%d", TD_VID(pWriter->pTsdb->pVnode), + pHeadFile->offset, size, taosArrayGetSize(aBlockIdx)); return code; _err: tsdbError("vgId:%d, write block idx failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - tFree(pBuf); return code; } -int32_t tsdbWriteBlock(SDataFWriter *pWriter, SMapData *mBlock, uint8_t **ppBuf, SBlockIdx *pBlockIdx) { - int32_t code = 0; - SHeadFile *pHeadFile = &pWriter->fHead; - SBlockDataHdr hdr = {.delimiter = TSDB_FILE_DLMT, .suid = pBlockIdx->suid, .uid = pBlockIdx->uid}; - uint8_t *pBuf = NULL; - int64_t size; - int64_t n; +int32_t tsdbWriteBlock(SDataFWriter *pWriter, SMapData *mBlock, SBlockIdx *pBlockIdx) { + int32_t code = 0; + SHeadFile *pHeadFile = &pWriter->fHead; + int64_t size; + int64_t n; ASSERT(mBlock->nItem > 0); - // prepare - size = sizeof(SBlockDataHdr) + tPutMapData(NULL, mBlock) + sizeof(TSCKSUM); - // alloc - if (!ppBuf) ppBuf = &pBuf; - code = tRealloc(ppBuf, size); + size = sizeof(uint32_t) + tPutMapData(NULL, mBlock) + sizeof(TSCKSUM); + code = tRealloc(&pWriter->aBuf[0], size); if (code) goto _err; // build n = 0; - *(SBlockDataHdr *)(*ppBuf) = hdr; - n += sizeof(hdr); - n += tPutMapData(*ppBuf + n, mBlock); - taosCalcChecksumAppend(0, *ppBuf, size); + n += tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT); + n += tPutMapData(pWriter->aBuf[0] + n, mBlock); + taosCalcChecksumAppend(0, pWriter->aBuf[0], size); ASSERT(n + sizeof(TSCKSUM) == size); // write - n = taosWriteFile(pWriter->pHeadFD, *ppBuf, size); + n = taosWriteFile(pWriter->pHeadFD, pWriter->aBuf[0], size); if (n < 0) { code = TAOS_SYSTEM_ERROR(errno); goto _err; @@ -1582,17 +1289,71 @@ int32_t tsdbWriteBlock(SDataFWriter *pWriter, SMapData *mBlock, uint8_t **ppBuf, pBlockIdx->size = size; pHeadFile->size += size; - tFree(pBuf); - tsdbTrace("vgId:%d, write block, offset:%" PRId64 " size:%" PRId64, TD_VID(pWriter->pTsdb->pVnode), pBlockIdx->offset, - pBlockIdx->size); + tsdbTrace("vgId:%d, write block, file ID:%d commit ID:%d suid:%" PRId64 " uid:%" PRId64 " offset:%" PRId64 + " size:%" PRId64 " nItem:%d", + TD_VID(pWriter->pTsdb->pVnode), pWriter->wSet.fid, pHeadFile->commitID, pBlockIdx->suid, pBlockIdx->uid, + pBlockIdx->offset, pBlockIdx->size, mBlock->nItem); return code; _err: - tFree(pBuf); tsdbError("vgId:%d, write block failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); return code; } +int32_t tsdbWriteBlockL(SDataFWriter *pWriter, SArray *aBlockL) { + int32_t code = 0; + SLastFile *pLastFile = &pWriter->fLast; + int64_t size; + int64_t n; + + // check + if (taosArrayGetSize(aBlockL) == 0) { + pLastFile->offset = pLastFile->size; + goto _exit; + } + + // size + size = sizeof(uint32_t); // TSDB_FILE_DLMT + for (int32_t iBlockL = 0; iBlockL < taosArrayGetSize(aBlockL); iBlockL++) { + size += tPutBlockL(NULL, taosArrayGet(aBlockL, iBlockL)); + } + size += sizeof(TSCKSUM); + + // alloc + code = tRealloc(&pWriter->aBuf[0], size); + if (code) goto _err; + + // encode + n = 0; + n += tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT); + for (int32_t iBlockL = 0; iBlockL < taosArrayGetSize(aBlockL); iBlockL++) { + n += tPutBlockL(pWriter->aBuf[0] + n, taosArrayGet(aBlockL, iBlockL)); + } + taosCalcChecksumAppend(0, pWriter->aBuf[0], size); + + ASSERT(n + sizeof(TSCKSUM) == size); + + // write + n = taosWriteFile(pWriter->pLastFD, pWriter->aBuf[0], size); + if (n < 0) { + code = TAOS_SYSTEM_ERROR(errno); + goto _err; + } + + // update + pLastFile->offset = pLastFile->size; + pLastFile->size += size; + +_exit: + tsdbTrace("vgId:%d tsdb write blockl, loffset:%" PRId64 " size:%" PRId64, TD_VID(pWriter->pTsdb->pVnode), + pLastFile->offset, size); + return code; + +_err: + tsdbError("vgId:%d tsdb write blockl failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); + return code; +} + static void tsdbUpdateBlockInfo(SBlockData *pBlockData, SBlock *pBlock) { for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) { TSDBKEY key = {.ts = pBlockData->aTSKEY[iRow], .version = pBlockData->aVersion[iRow]}; @@ -1611,357 +1372,127 @@ static void tsdbUpdateBlockInfo(SBlockData *pBlockData, SBlock *pBlock) { pBlock->maxKey = key; } - pBlock->minVersion = TMIN(pBlock->minVersion, key.version); - pBlock->maxVersion = TMAX(pBlock->maxVersion, key.version); + pBlock->minVer = TMIN(pBlock->minVer, key.version); + pBlock->maxVer = TMAX(pBlock->maxVer, key.version); } pBlock->nRow += pBlockData->nRow; } -static int32_t tsdbWriteBlockDataKey(SSubBlock *pSubBlock, SBlockData *pBlockData, uint8_t **ppBuf1, int64_t *nDataP, - uint8_t **ppBuf2) { +static int32_t tsdbWriteBlockSma(SDataFWriter *pWriter, SBlockData *pBlockData, SSmaInfo *pSmaInfo) { int32_t code = 0; - int64_t size; - int64_t tsize; - - if (pSubBlock->cmprAlg == NO_COMPRESSION) { - pSubBlock->szVersion = sizeof(int64_t) * pSubBlock->nRow; - pSubBlock->szTSKEY = sizeof(TSKEY) * pSubBlock->nRow; - - code = tRealloc(ppBuf1, *nDataP + pSubBlock->szVersion + pSubBlock->szTSKEY + sizeof(TSCKSUM)); - if (code) goto _err; - - // VERSION - memcpy(*ppBuf1 + *nDataP, pBlockData->aVersion, pSubBlock->szVersion); - - // TSKEY - memcpy(*ppBuf1 + *nDataP + pSubBlock->szVersion, pBlockData->aTSKEY, pSubBlock->szTSKEY); - } else { - size = (sizeof(int64_t) + sizeof(TSKEY)) * pSubBlock->nRow + COMP_OVERFLOW_BYTES * 2; - - code = tRealloc(ppBuf1, *nDataP + size + sizeof(TSCKSUM)); - if (code) goto _err; - - tsize = sizeof(int64_t) * pSubBlock->nRow + COMP_OVERFLOW_BYTES; - if (pSubBlock->cmprAlg == TWO_STAGE_COMP) { - code = tRealloc(ppBuf2, tsize); - if (code) goto _err; - } - - // VERSION - pSubBlock->szVersion = - tsCompressBigint((char *)pBlockData->aVersion, sizeof(int64_t) * pBlockData->nRow, pBlockData->nRow, - *ppBuf1 + *nDataP, size, pSubBlock->cmprAlg, *ppBuf2, tsize); - if (pSubBlock->szVersion <= 0) { - code = TSDB_CODE_COMPRESS_ERROR; - goto _err; - } - // TSKEY - pSubBlock->szTSKEY = tsCompressTimestamp((char *)pBlockData->aTSKEY, sizeof(TSKEY) * pBlockData->nRow, - pBlockData->nRow, *ppBuf1 + *nDataP + pSubBlock->szVersion, - size - pSubBlock->szVersion, pSubBlock->cmprAlg, *ppBuf2, tsize); - if (pSubBlock->szTSKEY <= 0) { - code = TSDB_CODE_COMPRESS_ERROR; - goto _err; - } + pSmaInfo->offset = 0; + pSmaInfo->size = 0; - ASSERT(pSubBlock->szVersion + pSubBlock->szTSKEY <= size); - } - - // checksum - size = pSubBlock->szVersion + pSubBlock->szTSKEY + sizeof(TSCKSUM); - taosCalcChecksumAppend(0, *ppBuf1 + *nDataP, size); - - *nDataP += size; - return code; - -_err: - return code; -} + // encode + for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) { + SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData); -static int32_t tsdbWriteColData(SColData *pColData, SBlockCol *pBlockCol, SSubBlock *pSubBlock, uint8_t **ppBuf1, - int64_t *nDataP, uint8_t **ppBuf2) { - int32_t code = 0; - int64_t size; - int64_t n = 0; + if ((!pColData->smaOn) || IS_VAR_DATA_TYPE(pColData->type)) continue; - // BITMAP - if (pColData->flag != HAS_VALUE) { - size = BIT2_SIZE(pColData->nVal) + COMP_OVERFLOW_BYTES; + SColumnDataAgg sma; + tsdbCalcColDataSMA(pColData, &sma); - code = tRealloc(ppBuf1, *nDataP + n + size); + code = tRealloc(&pWriter->aBuf[0], pSmaInfo->size + tPutColumnDataAgg(NULL, &sma)); if (code) goto _err; - - code = tRealloc(ppBuf2, size); - if (code) goto _err; - - pBlockCol->szBitmap = - tsCompressTinyint((char *)pColData->pBitMap, BIT2_SIZE(pColData->nVal), BIT2_SIZE(pColData->nVal), - *ppBuf1 + *nDataP + n, size, TWO_STAGE_COMP, *ppBuf2, size); - if (pBlockCol->szBitmap <= 0) { - code = TSDB_CODE_COMPRESS_ERROR; - goto _err; - } - } else { - pBlockCol->szBitmap = 0; + pSmaInfo->size += tPutColumnDataAgg(pWriter->aBuf[0] + pSmaInfo->size, &sma); } - n += pBlockCol->szBitmap; - // OFFSET - if (IS_VAR_DATA_TYPE(pColData->type)) { - size = sizeof(int32_t) * pColData->nVal + COMP_OVERFLOW_BYTES; + // write + if (pSmaInfo->size) { + int32_t size = pSmaInfo->size + sizeof(TSCKSUM); - code = tRealloc(ppBuf1, *nDataP + n + size); + code = tRealloc(&pWriter->aBuf[0], size); if (code) goto _err; - code = tRealloc(ppBuf2, size); - if (code) goto _err; + taosCalcChecksumAppend(0, pWriter->aBuf[0], size); - pBlockCol->szOffset = tsCompressInt((char *)pColData->aOffset, sizeof(int32_t) * pColData->nVal, pColData->nVal, - *ppBuf1 + *nDataP + n, size, TWO_STAGE_COMP, *ppBuf2, size); - if (pBlockCol->szOffset <= 0) { - code = TSDB_CODE_COMPRESS_ERROR; + int64_t n = taosWriteFile(pWriter->pSmaFD, pWriter->aBuf[0], size); + if (n < 0) { + code = TAOS_SYSTEM_ERROR(errno); goto _err; } - } else { - pBlockCol->szOffset = 0; - } - n += pBlockCol->szOffset; - // VALUE - if (pSubBlock->cmprAlg == NO_COMPRESSION) { - pBlockCol->szValue = pColData->nData; - - code = tRealloc(ppBuf1, *nDataP + n + pBlockCol->szValue + sizeof(TSCKSUM)); - if (code) goto _err; - - memcpy(*ppBuf1 + *nDataP + n, pColData->pData, pBlockCol->szValue); - } else { - size = pColData->nData + COMP_OVERFLOW_BYTES; - - code = tRealloc(ppBuf1, *nDataP + n + size + sizeof(TSCKSUM)); - if (code) goto _err; - - if (pSubBlock->cmprAlg == TWO_STAGE_COMP) { - code = tRealloc(ppBuf2, size); - if (code) goto _err; - } - - pBlockCol->szValue = - tDataTypes[pColData->type].compFunc((char *)pColData->pData, pColData->nData, pColData->nVal, - *ppBuf1 + *nDataP + n, size, pSubBlock->cmprAlg, *ppBuf2, size); - if (pBlockCol->szValue <= 0) { - code = TSDB_CODE_COMPRESS_ERROR; - goto _err; - } + pSmaInfo->offset = pWriter->fSma.size; + pWriter->fSma.size += size; } - n += pBlockCol->szValue; - pBlockCol->szOrigin = pColData->nData; - - // checksum - n += sizeof(TSCKSUM); - taosCalcChecksumAppend(0, *ppBuf1 + *nDataP, n); - - *nDataP += n; return code; _err: + tsdbError("vgId:%d tsdb write block sma failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); return code; } -static int32_t tsdbWriteBlockDataImpl(TdFilePtr pFD, SSubBlock *pSubBlock, SBlockDataHdr hdr, SArray *aBlockCol, - uint8_t *pData, int64_t nData, uint8_t **ppBuf) { +int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo, + int8_t cmprAlg, int8_t toLast) { int32_t code = 0; - int32_t nBlockCol = taosArrayGetSize(aBlockCol); - int64_t size; - int64_t n; - // HDR + SArray - pSubBlock->szBlockCol = sizeof(hdr); - for (int32_t iBlockCol = 0; iBlockCol < nBlockCol; iBlockCol++) { - pSubBlock->szBlockCol += tPutBlockCol(NULL, taosArrayGet(aBlockCol, iBlockCol)); - } + ASSERT(pBlockData->nRow > 0); - code = tRealloc(ppBuf, pSubBlock->szBlockCol + sizeof(TSCKSUM)); - if (code) goto _err; + pBlkInfo->offset = toLast ? pWriter->fLast.size : pWriter->fData.size; + pBlkInfo->szBlock = 0; + pBlkInfo->szKey = 0; - n = 0; - memcpy(*ppBuf, &hdr, sizeof(hdr)); - n += sizeof(hdr); - for (int32_t iBlockCol = 0; iBlockCol < nBlockCol; iBlockCol++) { - n += tPutBlockCol(*ppBuf + n, taosArrayGet(aBlockCol, iBlockCol)); - } - taosCalcChecksumAppend(0, *ppBuf, pSubBlock->szBlockCol + sizeof(TSCKSUM)); + int32_t aBufN[4] = {0}; + code = tCmprBlockData(pBlockData, cmprAlg, NULL, NULL, pWriter->aBuf, aBufN); + if (code) goto _err; - ASSERT(n == pSubBlock->szBlockCol); + // write ================= + TdFilePtr pFD = toLast ? pWriter->pLastFD : pWriter->pDataFD; - n = taosWriteFile(pFD, *ppBuf, pSubBlock->szBlockCol + sizeof(TSCKSUM)); - if (n < 0) { - code = TAOS_SYSTEM_ERROR(errno); - goto _err; - } + pBlkInfo->szKey = aBufN[3] + aBufN[2]; + pBlkInfo->szBlock = aBufN[0] + aBufN[1] + aBufN[2] + aBufN[3]; - // SBlockData - n = taosWriteFile(pFD, pData, nData); + int64_t n = taosWriteFile(pFD, pWriter->aBuf[3], aBufN[3]); if (n < 0) { code = TAOS_SYSTEM_ERROR(errno); goto _err; } - return code; - -_err: - return code; -} - -static int32_t tsdbWriteBlockSma(TdFilePtr pFD, SBlockData *pBlockData, SSubBlock *pSubBlock, uint8_t **ppBuf) { - int32_t code = 0; - int64_t n; - SColData *pColData; - - // prepare - pSubBlock->nSma = 0; - for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) { - pColData = tBlockDataGetColDataByIdx(pBlockData, iColData); - - if (IS_VAR_DATA_TYPE(pColData->type) || (!pColData->smaOn)) continue; - - pSubBlock->nSma++; - } - if (pSubBlock->nSma == 0) goto _exit; - - // calc - code = tRealloc(ppBuf, sizeof(SColumnDataAgg) * pSubBlock->nSma + sizeof(TSCKSUM)); - if (code) goto _err; - n = 0; - for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) { - pColData = tBlockDataGetColDataByIdx(pBlockData, iColData); - - if (IS_VAR_DATA_TYPE(pColData->type) || (!pColData->smaOn)) continue; - - tsdbCalcColDataSMA(pColData, &((SColumnDataAgg *)(*ppBuf))[n]); - n++; - } - taosCalcChecksumAppend(0, *ppBuf, sizeof(SColumnDataAgg) * pSubBlock->nSma + sizeof(TSCKSUM)); - - // write - n = taosWriteFile(pFD, *ppBuf, sizeof(SColumnDataAgg) * pSubBlock->nSma + sizeof(TSCKSUM)); + n = taosWriteFile(pFD, pWriter->aBuf[2], aBufN[2]); if (n < 0) { code = TAOS_SYSTEM_ERROR(errno); goto _err; } -_exit: - return code; - -_err: - return code; -} - -int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, uint8_t **ppBuf1, uint8_t **ppBuf2, - SBlockIdx *pBlockIdx, SBlock *pBlock, int8_t cmprAlg) { - int32_t code = 0; - SSubBlock *pSubBlock = &pBlock->aSubBlock[pBlock->nSubBlock++]; - SBlockCol blockCol; - SBlockCol *pBlockCol = &blockCol; - int64_t n; - TdFilePtr pFileFD = pBlock->last ? pWriter->pLastFD : pWriter->pDataFD; - SBlockDataHdr hdr = {.delimiter = TSDB_FILE_DLMT, .suid = pBlockIdx->suid, .uid = pBlockIdx->uid}; - uint8_t *p; - int64_t nData; - uint8_t *pBuf1 = NULL; - uint8_t *pBuf2 = NULL; - SArray *aBlockCol = NULL; - - if (!ppBuf1) ppBuf1 = &pBuf1; - if (!ppBuf2) ppBuf2 = &pBuf2; - - tsdbUpdateBlockInfo(pBlockData, pBlock); - - pSubBlock->nRow = pBlockData->nRow; - pSubBlock->cmprAlg = cmprAlg; - if (pBlock->last) { - pSubBlock->offset = pWriter->fLast.size; - } else { - pSubBlock->offset = pWriter->fData.size; - } - - // ======================= BLOCK DATA ======================= - // TSDBKEY - nData = 0; - code = tsdbWriteBlockDataKey(pSubBlock, pBlockData, ppBuf1, &nData, ppBuf2); - if (code) goto _err; - - // COLUMNS - aBlockCol = taosArrayInit(taosArrayGetSize(pBlockData->aIdx), sizeof(SBlockCol)); - if (aBlockCol == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } - int32_t offset = 0; - for (int32_t iCol = 0; iCol < taosArrayGetSize(pBlockData->aIdx); iCol++) { - SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iCol); - - ASSERT(pColData->flag); - - if (pColData->flag == HAS_NONE) continue; - - pBlockCol->cid = pColData->cid; - pBlockCol->type = pColData->type; - pBlockCol->smaOn = pColData->smaOn; - pBlockCol->flag = pColData->flag; - - if (pColData->flag != HAS_NULL) { - code = tsdbWriteColData(pColData, pBlockCol, pSubBlock, ppBuf1, &nData, ppBuf2); - if (code) goto _err; - - pBlockCol->offset = offset; - offset = offset + pBlockCol->szBitmap + pBlockCol->szOffset + pBlockCol->szValue + sizeof(TSCKSUM); + if (aBufN[1]) { + n = taosWriteFile(pFD, pWriter->aBuf[1], aBufN[1]); + if (n < 0) { + code = TAOS_SYSTEM_ERROR(errno); + goto _err; } + } - if (taosArrayPush(aBlockCol, pBlockCol) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; + if (aBufN[0]) { + n = taosWriteFile(pFD, pWriter->aBuf[0], aBufN[0]); + if (n < 0) { + code = TAOS_SYSTEM_ERROR(errno); goto _err; } } - // write - code = tsdbWriteBlockDataImpl(pFileFD, pSubBlock, hdr, aBlockCol, *ppBuf1, nData, ppBuf2); - if (code) goto _err; - - pSubBlock->szBlock = pSubBlock->szBlockCol + sizeof(TSCKSUM) + nData; - if (pBlock->last) { - pWriter->fLast.size += pSubBlock->szBlock; + // update info + if (toLast) { + pWriter->fLast.size += pBlkInfo->szBlock; } else { - pWriter->fData.size += pSubBlock->szBlock; + pWriter->fData.size += pBlkInfo->szBlock; } - // ======================= BLOCK SMA ======================= - pSubBlock->sOffset = 0; - pSubBlock->nSma = 0; - - if (pBlock->nSubBlock > 1 || pBlock->last || pBlock->hasDup) goto _exit; - - code = tsdbWriteBlockSma(pWriter->pSmaFD, pBlockData, pSubBlock, ppBuf1); - if (code) goto _err; - - if (pSubBlock->nSma > 0) { - pSubBlock->sOffset = pWriter->fSma.size; - pWriter->fSma.size += (sizeof(SColumnDataAgg) * pSubBlock->nSma + sizeof(TSCKSUM)); + // ================= SMA ==================== + if (pSmaInfo) { + code = tsdbWriteBlockSma(pWriter, pBlockData, pSmaInfo); + if (code) goto _err; } _exit: - tFree(pBuf1); - tFree(pBuf2); - taosArrayDestroy(aBlockCol); + tsdbTrace("vgId:%d tsdb write block data, suid:%" PRId64 " uid:%" PRId64 " nRow:%d, offset:%" PRId64 " size:%d", + TD_VID(pWriter->pTsdb->pVnode), pBlockData->suid, pBlockData->uid, pBlockData->nRow, pBlkInfo->offset, + pBlkInfo->szBlock); return code; _err: - tsdbError("vgId:%d, write block data failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); - tFree(pBuf1); - tFree(pBuf2); - taosArrayDestroy(aBlockCol); + tsdbError("vgId:%d tsdb write block data failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); return code; } @@ -2075,4 +1606,4 @@ int32_t tsdbDFileSetCopy(STsdb *pTsdb, SDFileSet *pSetFrom, SDFileSet *pSetTo) { _err: tsdbError("vgId:%d, tsdb DFileSet copy failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); return code; -} \ No newline at end of file +} diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c index c40fb98d62b8dd6b1a76c1f6ec2fd870cbff3624..ab2b2b617a3d36dbc2c86c2a2207cffac8f087f6 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c @@ -27,12 +27,16 @@ struct STsdbSnapReader { int32_t fid; SDataFReader* pDataFReader; SArray* aBlockIdx; // SArray - int32_t iBlockIdx; + SArray* aBlockL; // SArray SBlockIdx* pBlockIdx; - SMapData mBlock; // SMapData - int32_t iBlock; - SBlockData oBlockData; - SBlockData nBlockData; + SBlockL* pBlockL; + + int32_t iBlockIdx; + int32_t iBlockL; + SMapData mBlock; // SMapData + int32_t iBlock; + SBlockData oBlockData; + SBlockData nBlockData; // for del file int8_t delDone; SDelFReader* pDelFReader; @@ -47,114 +51,116 @@ static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) { while (true) { if (pReader->pDataFReader == NULL) { - SDFileSet* pSet = - taosArraySearch(pReader->fs.aDFileSet, &(SDFileSet){.fid = pReader->fid}, tDFileSetCmprFn, TD_GT); - + // next + SDFileSet dFileSet = {.fid = pReader->fid}; + SDFileSet* pSet = taosArraySearch(pReader->fs.aDFileSet, &dFileSet, tDFileSetCmprFn, TD_GT); if (pSet == NULL) goto _exit; - pReader->fid = pSet->fid; - code = tsdbDataFReaderOpen(&pReader->pDataFReader, pReader->pTsdb, pSet); - if (code) goto _err; - // SBlockIdx - code = tsdbReadBlockIdx(pReader->pDataFReader, pReader->aBlockIdx, NULL); + // load + code = tsdbDataFReaderOpen(&pReader->pDataFReader, pTsdb, pSet); if (code) goto _err; - pReader->iBlockIdx = 0; - pReader->pBlockIdx = NULL; - - tsdbInfo("vgId:%d, vnode snapshot tsdb open data file to read for %s, fid:%d", TD_VID(pTsdb->pVnode), pTsdb->path, - pReader->fid); - } + code = tsdbReadBlockIdx(pReader->pDataFReader, pReader->aBlockIdx); + if (code) goto _err; - while (true) { - if (pReader->pBlockIdx == NULL) { - if (pReader->iBlockIdx >= taosArrayGetSize(pReader->aBlockIdx)) { - tsdbDataFReaderClose(&pReader->pDataFReader); - break; - } + code = tsdbReadBlockL(pReader->pDataFReader, pReader->aBlockL); + if (code) goto _err; + // init + pReader->iBlockIdx = 0; + if (pReader->iBlockIdx < taosArrayGetSize(pReader->aBlockIdx)) { pReader->pBlockIdx = (SBlockIdx*)taosArrayGet(pReader->aBlockIdx, pReader->iBlockIdx); - pReader->iBlockIdx++; - code = tsdbReadBlock(pReader->pDataFReader, pReader->pBlockIdx, &pReader->mBlock, NULL); + code = tsdbReadBlock(pReader->pDataFReader, pReader->pBlockIdx, &pReader->mBlock); if (code) goto _err; pReader->iBlock = 0; + } else { + pReader->pBlockIdx = NULL; } - SBlock block; - SBlock* pBlock = █ + pReader->iBlockL = 0; while (true) { - if (pReader->iBlock >= pReader->mBlock.nItem) { - pReader->pBlockIdx = NULL; + if (pReader->iBlockL >= taosArrayGetSize(pReader->aBlockL)) { + pReader->pBlockL = NULL; break; } - tMapDataGetItemByIdx(&pReader->mBlock, pReader->iBlock, pBlock, tGetBlock); - pReader->iBlock++; - - if (pBlock->minVersion > pReader->ever || pBlock->maxVersion < pReader->sver) continue; - - code = tsdbReadBlockData(pReader->pDataFReader, pReader->pBlockIdx, pBlock, &pReader->oBlockData, NULL, NULL); - if (code) goto _err; - - // filter - tBlockDataReset(&pReader->nBlockData); - for (int32_t iColData = 0; iColData < taosArrayGetSize(pReader->oBlockData.aIdx); iColData++) { - SColData* pColDataO = tBlockDataGetColDataByIdx(&pReader->oBlockData, iColData); - SColData* pColDataN = NULL; - - code = tBlockDataAddColData(&pReader->nBlockData, taosArrayGetSize(pReader->nBlockData.aIdx), &pColDataN); - if (code) goto _err; - - tColDataInit(pColDataN, pColDataO->cid, pColDataO->type, pColDataO->smaOn); + pReader->pBlockL = (SBlockL*)taosArrayGet(pReader->aBlockL, pReader->iBlockL); + if (pReader->pBlockL->minVer <= pReader->ever && pReader->pBlockL->maxVer >= pReader->sver) { + // TODO + break; } - for (int32_t iRow = 0; iRow < pReader->oBlockData.nRow; iRow++) { - TSDBROW row = tsdbRowFromBlockData(&pReader->oBlockData, iRow); - int64_t version = TSDBROW_VERSION(&row); + pReader->iBlockL++; + } - tsdbTrace("vgId:%d, vnode snapshot tsdb read for %s, %" PRId64 "(%" PRId64 " , %" PRId64 ")", - TD_VID(pReader->pTsdb->pVnode), pReader->pTsdb->path, version, pReader->sver, pReader->ever); + tsdbInfo("vgId:%d, vnode snapshot tsdb open data file to read for %s, fid:%d", TD_VID(pTsdb->pVnode), pTsdb->path, + pReader->fid); + } - if (version < pReader->sver || version > pReader->ever) continue; + while (true) { + if (pReader->pBlockIdx && pReader->pBlockL) { + TABLEID id = {.suid = pReader->pBlockL->suid, .uid = pReader->pBlockL->minUid}; + + ASSERT(0); + + // if (tTABLEIDCmprFn(pReader->pBlockIdx, &minId) < 0) { + // // TODO + // } else if (tTABLEIDCmprFn(pReader->pBlockIdx, &maxId) < 0) { + // // TODO + // } else { + // // TODO + // } + } else if (pReader->pBlockIdx) { + while (pReader->iBlock < pReader->mBlock.nItem) { + SBlock block; + tMapDataGetItemByIdx(&pReader->mBlock, pReader->iBlock, &block, tGetBlock); + + if (block.minVer <= pReader->ever && block.maxVer >= pReader->sver) { + // load data (todo) + } - code = tBlockDataAppendRow(&pReader->nBlockData, &row, NULL); - if (code) goto _err; + // next + pReader->iBlock++; + if (*ppData) break; } - if (pReader->nBlockData.nRow <= 0) { - continue; - } + if (pReader->iBlock >= pReader->mBlock.nItem) { + pReader->iBlockIdx++; + if (pReader->iBlockIdx < taosArrayGetSize(pReader->aBlockIdx)) { + pReader->pBlockIdx = (SBlockIdx*)taosArrayGet(pReader->aBlockIdx, pReader->iBlockIdx); - // org data - // compress data (todo) - int32_t size = sizeof(TABLEID) + tPutBlockData(NULL, &pReader->nBlockData); + code = tsdbReadBlock(pReader->pDataFReader, pReader->pBlockIdx, &pReader->mBlock); + if (code) goto _err; - *ppData = taosMemoryMalloc(sizeof(SSnapDataHdr) + size); - if (*ppData == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; + pReader->iBlock = 0; + } else { + pReader->pBlockIdx = NULL; + } } - SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData); - pHdr->type = pReader->type; - pHdr->size = size; - - TABLEID* pId = (TABLEID*)(&pHdr[1]); - pId->suid = pReader->pBlockIdx->suid; - pId->uid = pReader->pBlockIdx->uid; - - tPutBlockData((uint8_t*)(&pId[1]), &pReader->nBlockData); + if (*ppData) goto _exit; + } else if (pReader->pBlockL) { + while (pReader->pBlockL) { + if (pReader->pBlockL->minVer <= pReader->ever && pReader->pBlockL->maxVer >= pReader->sver) { + // load data (todo) + } - tsdbInfo("vgId:%d, vnode snapshot read data for %s, fid:%d suid:%" PRId64 " uid:%" PRId64 - " iBlock:%d minVersion:%d maxVersion:%d nRow:%d out of %d size:%d", - TD_VID(pTsdb->pVnode), pTsdb->path, pReader->fid, pReader->pBlockIdx->suid, pReader->pBlockIdx->uid, - pReader->iBlock - 1, pBlock->minVersion, pBlock->maxVersion, pReader->nBlockData.nRow, pBlock->nRow, - size); + // next + pReader->iBlockL++; + if (pReader->iBlockL < taosArrayGetSize(pReader->aBlockL)) { + pReader->pBlockL = (SBlockL*)taosArrayGetSize(pReader->aBlockL); + } else { + pReader->pBlockL = NULL; + } - goto _exit; + if (*ppData) goto _exit; + } + } else { + tsdbDataFReaderClose(&pReader->pDataFReader); + break; } } } @@ -179,11 +185,11 @@ static int32_t tsdbSnapReadDel(STsdbSnapReader* pReader, uint8_t** ppData) { } // open - code = tsdbDelFReaderOpen(&pReader->pDelFReader, pDelFile, pTsdb, NULL); + code = tsdbDelFReaderOpen(&pReader->pDelFReader, pDelFile, pTsdb); if (code) goto _err; // read index - code = tsdbReadDelIdx(pReader->pDelFReader, pReader->aDelIdx, NULL); + code = tsdbReadDelIdx(pReader->pDelFReader, pReader->aDelIdx); if (code) goto _err; pReader->iDelIdx = 0; @@ -199,7 +205,7 @@ static int32_t tsdbSnapReadDel(STsdbSnapReader* pReader, uint8_t** ppData) { pReader->iDelIdx++; - code = tsdbReadDelData(pReader->pDelFReader, pDelIdx, pReader->aDelData, NULL); + code = tsdbReadDelData(pReader->pDelFReader, pDelIdx, pReader->aDelData); if (code) goto _err; int32_t size = 0; @@ -292,10 +298,15 @@ int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, int8_t type code = TSDB_CODE_OUT_OF_MEMORY; goto _err; } + pReader->aBlockL = taosArrayInit(0, sizeof(SBlockL)); + if (pReader->aBlockL == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } pReader->mBlock = tMapDataInit(); - code = tBlockDataInit(&pReader->oBlockData); + code = tBlockDataCreate(&pReader->oBlockData); if (code) goto _err; - code = tBlockDataInit(&pReader->nBlockData); + code = tBlockDataCreate(&pReader->nBlockData); if (code) goto _err; pReader->aDelIdx = taosArrayInit(0, sizeof(SDelIdx)); @@ -327,10 +338,11 @@ int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader) { if (pReader->pDataFReader) { tsdbDataFReaderClose(&pReader->pDataFReader); } + taosArrayDestroy(pReader->aBlockL); taosArrayDestroy(pReader->aBlockIdx); tMapDataClear(&pReader->mBlock); - tBlockDataClear(&pReader->oBlockData, 1); - tBlockDataClear(&pReader->nBlockData, 1); + tBlockDataDestroy(&pReader->oBlockData, 1); + tBlockDataDestroy(&pReader->nBlockData, 1); if (pReader->pDelFReader) { tsdbDelFReaderClose(&pReader->pDelFReader); @@ -405,6 +417,7 @@ struct STsdbSnapWriter { int8_t cmprAlg; int64_t commitID; + uint8_t* aBuf[5]; // for data file SBlockData bData; @@ -418,6 +431,9 @@ struct STsdbSnapWriter { SBlockData* pBlockData; int32_t iRow; SBlockData bDataR; + SArray* aBlockL; // SArray + int32_t iBlockL; + SBlockData lDataR; SDataFWriter* pDataFWriter; SBlockIdx* pBlockIdxW; // NULL when no committing table @@ -427,6 +443,7 @@ struct STsdbSnapWriter { SMapData mBlockW; // SMapData SArray* aBlockIdxW; // SArray + SArray* aBlockLW; // SArray // for del file SDelFReader* pDelFReader; @@ -437,25 +454,6 @@ struct STsdbSnapWriter { SArray* aDelIdxW; }; -static int32_t tsdbSnapWriteAppendData(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData) { - int32_t code = 0; - int32_t iRow = 0; // todo - int32_t nRow = 0; // todo - SBlockData* pBlockData = NULL; // todo - - while (iRow < nRow) { - code = tBlockDataAppendRow(&pWriter->bDataW, &tsdbRowFromBlockData(pBlockData, iRow), NULL); - if (code) goto _err; - } - - return code; - -_err: - tsdbError("vgId:%d, tsdb snapshot write append data for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), - pWriter->pTsdb->path, tstrerror(code)); - return code; -} - static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) { int32_t code = 0; @@ -467,20 +465,21 @@ static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) { if (pWriter->pBlockData) { ASSERT(pWriter->iRow < pWriter->pBlockData->nRow); while (pWriter->iRow < pWriter->pBlockData->nRow) { - code = tBlockDataAppendRow(&pWriter->bDataW, &tsdbRowFromBlockData(pWriter->pBlockData, pWriter->iRow), NULL); + code = tBlockDataAppendRow(&pWriter->bDataW, &tsdbRowFromBlockData(pWriter->pBlockData, pWriter->iRow), NULL, + 0); // todo if (code) goto _err; if (pWriter->bDataW.nRow >= pWriter->maxRow * 4 / 5) { - pWriter->blockW.last = 0; - code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW, - &pWriter->blockW, pWriter->cmprAlg); + // pWriter->blockW.last = 0; + // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW, + // &pWriter->blockW, pWriter->cmprAlg); if (code) goto _err; code = tMapDataPutItem(&pWriter->mBlockW, &pWriter->blockW, tPutBlock); if (code) goto _err; tBlockReset(&pWriter->blockW); - tBlockDataClearData(&pWriter->bDataW); + tBlockDataClear(&pWriter->bDataW); } pWriter->iRow++; @@ -489,16 +488,16 @@ static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) { // write remain data if has if (pWriter->bDataW.nRow > 0) { - pWriter->blockW.last = 0; + // pWriter->blockW.last = 0; if (pWriter->bDataW.nRow < pWriter->minRow) { if (pWriter->iBlock > pWriter->mBlock.nItem) { - pWriter->blockW.last = 1; + // pWriter->blockW.last = 1; } } - code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW, - &pWriter->blockW, pWriter->cmprAlg); - if (code) goto _err; + // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW, + // &pWriter->blockW, pWriter->cmprAlg); + // if (code) goto _err; code = tMapDataPutItem(&pWriter->mBlockW, &pWriter->blockW, tPutBlock); if (code) goto _err; @@ -510,16 +509,16 @@ static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) { SBlock block; tMapDataGetItemByIdx(&pWriter->mBlock, pWriter->iBlock, &block, tGetBlock); - if (block.last) { - code = tsdbReadBlockData(pWriter->pDataFReader, pWriter->pBlockIdx, &block, &pWriter->bDataR, NULL, NULL); - if (code) goto _err; + // if (block.last) { + // code = tsdbReadBlockData(pWriter->pDataFReader, pWriter->pBlockIdx, &block, &pWriter->bDataR, NULL, NULL); + // if (code) goto _err; - tBlockReset(&block); - block.last = 1; - code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataR, NULL, NULL, pWriter->pBlockIdxW, &block, - pWriter->cmprAlg); - if (code) goto _err; - } + // tBlockReset(&block); + // block.last = 1; + // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataR, NULL, NULL, pWriter->pBlockIdxW, &block, + // pWriter->cmprAlg); + // if (code) goto _err; + // } code = tMapDataPutItem(&pWriter->mBlockW, &block, tPutBlock); if (code) goto _err; @@ -528,8 +527,8 @@ static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) { } // SBlock - code = tsdbWriteBlock(pWriter->pDataFWriter, &pWriter->mBlockW, NULL, pWriter->pBlockIdxW); - if (code) goto _err; + // code = tsdbWriteBlock(pWriter->pDataFWriter, &pWriter->mBlockW, NULL, pWriter->pBlockIdxW); + // if (code) goto _err; // SBlockIdx if (taosArrayPush(pWriter->aBlockIdxW, pWriter->pBlockIdxW) == NULL) { @@ -550,7 +549,7 @@ _err: static int32_t tsdbSnapMoveWriteTableData(STsdbSnapWriter* pWriter, SBlockIdx* pBlockIdx) { int32_t code = 0; - code = tsdbReadBlock(pWriter->pDataFReader, pBlockIdx, &pWriter->mBlock, NULL); + code = tsdbReadBlock(pWriter->pDataFReader, pBlockIdx, &pWriter->mBlock); if (code) goto _err; // SBlockData @@ -559,16 +558,17 @@ static int32_t tsdbSnapMoveWriteTableData(STsdbSnapWriter* pWriter, SBlockIdx* p for (int32_t iBlock = 0; iBlock < pWriter->mBlock.nItem; iBlock++) { tMapDataGetItemByIdx(&pWriter->mBlock, iBlock, &block, tGetBlock); - if (block.last) { - code = tsdbReadBlockData(pWriter->pDataFReader, pBlockIdx, &block, &pWriter->bDataR, NULL, NULL); - if (code) goto _err; + // if (block.last) { + // code = tsdbReadBlockData(pWriter->pDataFReader, pBlockIdx, &block, &pWriter->bDataR, NULL, NULL); + // if (code) goto _err; - tBlockReset(&block); - block.last = 1; - code = - tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataR, NULL, NULL, pBlockIdx, &block, pWriter->cmprAlg); - if (code) goto _err; - } + // tBlockReset(&block); + // block.last = 1; + // code = + // tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataR, NULL, NULL, pBlockIdx, &block, + // pWriter->cmprAlg); + // if (code) goto _err; + // } code = tMapDataPutItem(&pWriter->mBlockW, &block, tPutBlock); if (code) goto _err; @@ -576,7 +576,7 @@ static int32_t tsdbSnapMoveWriteTableData(STsdbSnapWriter* pWriter, SBlockIdx* p // SBlock SBlockIdx blockIdx = {.suid = pBlockIdx->suid, .uid = pBlockIdx->uid}; - code = tsdbWriteBlock(pWriter->pDataFWriter, &pWriter->mBlockW, NULL, &blockIdx); + code = tsdbWriteBlock(pWriter->pDataFWriter, &pWriter->mBlockW, &blockIdx); if (code) goto _err; // SBlockIdx @@ -601,9 +601,9 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) { TSDBROW row; TSDBROW* pRow = &row; - // correct schema - code = tBlockDataCorrectSchema(&pWriter->bDataW, pBlockData); - if (code) goto _err; + // // correct schema + // code = tBlockDataCorrectSchema(&pWriter->bDataW, pBlockData); + // if (code) goto _err; // loop to merge *pRow = tsdbRowFromBlockData(pBlockData, iRow); @@ -618,8 +618,8 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) { ASSERT(c); if (c < 0) { - code = tBlockDataAppendRow(&pWriter->bDataW, pRow, NULL); - if (code) goto _err; + // code = tBlockDataAppendRow(&pWriter->bDataW, pRow, NULL); + // if (code) goto _err; iRow++; if (iRow < pWriter->pBlockData->nRow) { @@ -628,8 +628,8 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) { pRow = NULL; } } else if (c > 0) { - code = tBlockDataAppendRow(&pWriter->bDataW, &tsdbRowFromBlockData(pWriter->pBlockData, pWriter->iRow), NULL); - if (code) goto _err; + // code = tBlockDataAppendRow(&pWriter->bDataW, &tsdbRowFromBlockData(pWriter->pBlockData, pWriter->iRow), + // NULL); if (code) goto _err; pWriter->iRow++; if (pWriter->iRow >= pWriter->pBlockData->nRow) { @@ -647,16 +647,15 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) { tMapDataGetItemByIdx(&pWriter->mBlock, pWriter->iBlock, &block, tGetBlock); - if (block.last) { - pWriter->pBlockData = &pWriter->bDataR; + // if (block.last) { + // pWriter->pBlockData = &pWriter->bDataR; - code = tsdbReadBlockData(pWriter->pDataFReader, pWriter->pBlockIdx, &block, pWriter->pBlockData, NULL, NULL); - if (code) goto _err; - pWriter->iRow = 0; + // code = tsdbReadBlockData(pWriter->pDataFReader, pWriter->pBlockIdx, &block, pWriter->pBlockData, NULL, + // NULL); if (code) goto _err; pWriter->iRow = 0; - pWriter->iBlock++; - break; - } + // pWriter->iBlock++; + // break; + // } c = tsdbKeyCmprFn(&block.maxKey, &key); @@ -664,16 +663,16 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) { if (c < 0) { if (pWriter->bDataW.nRow) { - pWriter->blockW.last = 0; - code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW, - &pWriter->blockW, pWriter->cmprAlg); - if (code) goto _err; + // pWriter->blockW.last = 0; + // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW, + // &pWriter->blockW, pWriter->cmprAlg); + // if (code) goto _err; code = tMapDataPutItem(&pWriter->mBlockW, &pWriter->blockW, tPutBlock); if (code) goto _err; tBlockReset(&pWriter->blockW); - tBlockDataClearData(&pWriter->bDataW); + tBlockDataClear(&pWriter->bDataW); } code = tMapDataPutItem(&pWriter->mBlockW, &block, tPutBlock); @@ -687,9 +686,10 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) { if (c > 0) { pWriter->pBlockData = &pWriter->bDataR; - code = - tsdbReadBlockData(pWriter->pDataFReader, pWriter->pBlockIdx, &block, pWriter->pBlockData, NULL, NULL); - if (code) goto _err; + // code = + // tsdbReadBlockData(pWriter->pDataFReader, pWriter->pBlockIdx, &block, pWriter->pBlockData, NULL, + // NULL); + // if (code) goto _err; pWriter->iRow = 0; pWriter->iBlock++; @@ -700,8 +700,8 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) { if (pWriter->pBlockData) continue; - code = tBlockDataAppendRow(&pWriter->bDataW, pRow, NULL); - if (code) goto _err; + // code = tBlockDataAppendRow(&pWriter->bDataW, pRow, NULL); + // if (code) goto _err; iRow++; if (iRow < pBlockData->nRow) { @@ -715,15 +715,15 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) { if (pWriter->bDataW.nRow < pWriter->maxRow * 4 / 5) continue; _write_block: - code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW, - &pWriter->blockW, pWriter->cmprAlg); - if (code) goto _err; + // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW, + // &pWriter->blockW, pWriter->cmprAlg); + // if (code) goto _err; code = tMapDataPutItem(&pWriter->mBlockW, &pWriter->blockW, tPutBlock); if (code) goto _err; tBlockReset(&pWriter->blockW); - tBlockDataClearData(&pWriter->bDataW); + tBlockDataClear(&pWriter->bDataW); } return code; @@ -789,7 +789,7 @@ static int32_t tsdbSnapWriteTableData(STsdbSnapWriter* pWriter, TABLEID id) { } if (pWriter->pBlockIdx) { - code = tsdbReadBlock(pWriter->pDataFReader, pWriter->pBlockIdx, &pWriter->mBlock, NULL); + code = tsdbReadBlock(pWriter->pDataFReader, pWriter->pBlockIdx, &pWriter->mBlock); if (code) goto _err; } else { tMapDataReset(&pWriter->mBlock); @@ -831,9 +831,11 @@ static int32_t tsdbSnapWriteDataEnd(STsdbSnapWriter* pWriter) { if (pWriter->pDataFWriter == NULL) goto _exit; + // finish current table code = tsdbSnapWriteTableDataEnd(pWriter); if (code) goto _err; + // move remain table while (pWriter->iBlockIdx < taosArrayGetSize(pWriter->aBlockIdx)) { code = tsdbSnapMoveWriteTableData(pWriter, (SBlockIdx*)taosArrayGet(pWriter->aBlockIdx, pWriter->iBlockIdx)); if (code) goto _err; @@ -841,8 +843,16 @@ static int32_t tsdbSnapWriteDataEnd(STsdbSnapWriter* pWriter) { pWriter->iBlockIdx++; } - code = tsdbWriteBlockIdx(pWriter->pDataFWriter, pWriter->aBlockIdxW, NULL); - if (code) goto _err; + // write remain stuff + if (taosArrayGetSize(pWriter->aBlockLW) > 0) { + code = tsdbWriteBlockL(pWriter->pDataFWriter, pWriter->aBlockIdxW); + if (code) goto _err; + } + + if (taosArrayGetSize(pWriter->aBlockIdx) > 0) { + code = tsdbWriteBlockIdx(pWriter->pDataFWriter, pWriter->aBlockIdxW); + if (code) goto _err; + } code = tsdbFSUpsertFSet(&pWriter->fs, &pWriter->pDataFWriter->wSet); if (code) goto _err; @@ -866,19 +876,22 @@ _err: } static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData) { - int32_t code = 0; - STsdb* pTsdb = pWriter->pTsdb; - TABLEID id = *(TABLEID*)(pData + sizeof(SSnapDataHdr)); - int64_t n; + int32_t code = 0; + STsdb* pTsdb = pWriter->pTsdb; + SSnapDataHdr* pHdr = (SSnapDataHdr*)pData; + TABLEID id = *(TABLEID*)(pData + sizeof(SSnapDataHdr)); + int64_t n; // decode SBlockData* pBlockData = &pWriter->bData; - n = tGetBlockData(pData + sizeof(SSnapDataHdr) + sizeof(TABLEID), pBlockData); - ASSERT(n + sizeof(SSnapDataHdr) + sizeof(TABLEID) == nData); + code = tDecmprBlockData(pData + sizeof(SSnapDataHdr) + sizeof(TABLEID), pHdr->size - sizeof(TABLEID), pBlockData, + pWriter->aBuf); + if (code) goto _err; // open file - TSDBKEY keyFirst = tBlockDataFirstKey(pBlockData); - TSDBKEY keyLast = tBlockDataLastKey(pBlockData); + TSDBKEY keyFirst = {.version = pBlockData->aVersion[0], .ts = pBlockData->aTSKEY[0]}; + TSDBKEY keyLast = {.version = pBlockData->aVersion[pBlockData->nRow - 1], + .ts = pBlockData->aTSKEY[pBlockData->nRow - 1]}; int32_t fid = tsdbKeyFid(keyFirst.ts, pWriter->minutes, pWriter->precision); ASSERT(fid == tsdbKeyFid(keyLast.ts, pWriter->minutes, pWriter->precision)); @@ -895,11 +908,15 @@ static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint3 code = tsdbDataFReaderOpen(&pWriter->pDataFReader, pTsdb, pSet); if (code) goto _err; - code = tsdbReadBlockIdx(pWriter->pDataFReader, pWriter->aBlockIdx, NULL); + code = tsdbReadBlockIdx(pWriter->pDataFReader, pWriter->aBlockIdx); + if (code) goto _err; + + code = tsdbReadBlockL(pWriter->pDataFReader, pWriter->aBlockL); if (code) goto _err; } else { ASSERT(pWriter->pDataFReader == NULL); taosArrayClear(pWriter->aBlockIdx); + taosArrayClear(pWriter->aBlockL); } pWriter->iBlockIdx = 0; pWriter->pBlockIdx = NULL; @@ -907,7 +924,9 @@ static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint3 pWriter->iBlock = 0; pWriter->pBlockData = NULL; pWriter->iRow = 0; + pWriter->iBlockL = 0; tBlockDataReset(&pWriter->bDataR); + tBlockDataReset(&pWriter->lDataR); // write SHeadFile fHead; @@ -928,7 +947,7 @@ static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint3 wSet.fid = fid; fHead = (SHeadFile){.commitID = pWriter->commitID, .offset = 0, .size = 0}; fData = (SDataFile){.commitID = pWriter->commitID, .size = 0}; - fLast = (SLastFile){.commitID = pWriter->commitID, .size = 0}; + fLast = (SLastFile){.commitID = pWriter->commitID, .size = 0, .offset = 0}; fSma = (SSmaFile){.commitID = pWriter->commitID, .size = 0}; } @@ -936,6 +955,7 @@ static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint3 if (code) goto _err; taosArrayClear(pWriter->aBlockIdxW); + taosArrayClear(pWriter->aBlockLW); tMapDataReset(&pWriter->mBlockW); pWriter->pBlockIdxW = NULL; tBlockDataReset(&pWriter->bDataW); @@ -963,10 +983,10 @@ static int32_t tsdbSnapWriteDel(STsdbSnapWriter* pWriter, uint8_t* pData, uint32 // reader if (pDelFile) { - code = tsdbDelFReaderOpen(&pWriter->pDelFReader, pDelFile, pTsdb, NULL); + code = tsdbDelFReaderOpen(&pWriter->pDelFReader, pDelFile, pTsdb); if (code) goto _err; - code = tsdbReadDelIdx(pWriter->pDelFReader, pWriter->aDelIdxR, NULL); + code = tsdbReadDelIdx(pWriter->pDelFReader, pWriter->aDelIdxR); if (code) goto _err; } @@ -980,60 +1000,57 @@ static int32_t tsdbSnapWriteDel(STsdbSnapWriter* pWriter, uint8_t* pData, uint32 TABLEID id = *(TABLEID*)(pData + sizeof(SSnapDataHdr)); while (true) { - SDelIdx* pDelIdx = NULL; - int64_t n = sizeof(SSnapDataHdr) + sizeof(TABLEID); - SDelData delData; - SDelIdx delIdx; - int8_t toBreak = 0; + if (pWriter->iDelIdx >= taosArrayGetSize(pWriter->aDelIdxR)) break; + if (tTABLEIDCmprFn(taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx), &id) >= 0) break; - if (pWriter->iDelIdx < taosArrayGetSize(pWriter->aDelIdxR)) { - pDelIdx = (SDelIdx*)taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx); - } + SDelIdx* pDelIdx = (SDelIdx*)taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx); - if (pDelIdx) { - int32_t c = tTABLEIDCmprFn(&id, pDelIdx); - if (c < 0) { - goto _new_del; - } else { - code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData, NULL); - if (code) goto _err; + code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData); + if (code) goto _err; - pWriter->iDelIdx++; - if (c == 0) { - toBreak = 1; - delIdx = (SDelIdx){.suid = id.suid, .uid = id.uid}; - goto _merge_del; - } else { - delIdx = (SDelIdx){.suid = pDelIdx->suid, .uid = pDelIdx->uid}; - goto _write_del; - } - } + SDelIdx delIdx = *pDelIdx; + code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, &delIdx); + if (code) goto _err; + + if (taosArrayPush(pWriter->aDelIdxW, &delIdx) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; } - _new_del: - toBreak = 1; - delIdx = (SDelIdx){.suid = id.suid, .uid = id.uid}; - taosArrayClear(pWriter->aDelData); + pWriter->iDelIdx++; + } - _merge_del: - while (n < nData) { - n += tGetDelData(pData + n, &delData); - if (taosArrayPush(pWriter->aDelData, &delData) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } - } + if (pWriter->iDelIdx < taosArrayGetSize(pWriter->aDelIdxR) && + tTABLEIDCmprFn(taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx), &id) == 0) { + SDelIdx* pDelIdx = (SDelIdx*)taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx); - _write_del: - code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, NULL, &delIdx); + code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData); if (code) goto _err; - if (taosArrayPush(pWriter->aDelIdxW, &delIdx) == NULL) { + pWriter->iDelIdx++; + } else { + taosArrayClear(pWriter->aDelData); + } + + int64_t n = sizeof(SSnapDataHdr) + sizeof(TABLEID); + while (n < nData) { + SDelData delData; + + n += tGetDelData(pData + n, &delData); + + if (taosArrayPush(pWriter->aDelData, &delData) == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _err; } + } - if (toBreak) break; + SDelIdx delIdx = {.suid = id.suid, .uid = id.uid}; + code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, &delIdx); + if (code) goto _err; + + if (taosArrayPush(pWriter->aDelIdxW, &delIdx) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; } _exit: @@ -1054,11 +1071,11 @@ static int32_t tsdbSnapWriteDelEnd(STsdbSnapWriter* pWriter) { for (; pWriter->iDelIdx < taosArrayGetSize(pWriter->aDelIdxR); pWriter->iDelIdx++) { SDelIdx* pDelIdx = (SDelIdx*)taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx); - code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData, NULL); + code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData); if (code) goto _err; - SDelIdx delIdx = (SDelIdx){.suid = pDelIdx->suid, .uid = pDelIdx->uid}; - code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, NULL, &delIdx); + SDelIdx delIdx = *pDelIdx; + code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, &delIdx); if (code) goto _err; if (taosArrayPush(pWriter->aDelIdxR, &delIdx) == NULL) { @@ -1117,7 +1134,7 @@ int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWr pWriter->commitID = pTsdb->pVnode->state.commitID; // for data file - code = tBlockDataInit(&pWriter->bData); + code = tBlockDataCreate(&pWriter->bData); if (code) goto _err; pWriter->aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx)); @@ -1125,17 +1142,29 @@ int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWr code = TSDB_CODE_OUT_OF_MEMORY; goto _err; } - code = tBlockDataInit(&pWriter->bDataR); + code = tBlockDataCreate(&pWriter->bDataR); if (code) goto _err; + pWriter->aBlockL = taosArrayInit(0, sizeof(SBlockL)); + if (pWriter->aBlockL == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + pWriter->aBlockIdxW = taosArrayInit(0, sizeof(SBlockIdx)); if (pWriter->aBlockIdxW == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _err; } - code = tBlockDataInit(&pWriter->bDataW); + code = tBlockDataCreate(&pWriter->bDataW); if (code) goto _err; + pWriter->aBlockLW = taosArrayInit(0, sizeof(SBlockL)); + if (pWriter->aBlockLW == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + // for del file pWriter->aDelIdxR = taosArrayInit(0, sizeof(SDelIdx)); if (pWriter->aDelIdxR == NULL) { @@ -1186,6 +1215,10 @@ int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback) { if (code) goto _err; } + for (int32_t iBuf = 0; iBuf < sizeof(pWriter->aBuf) / sizeof(uint8_t*); iBuf++) { + tFree(pWriter->aBuf[iBuf]); + } + tsdbInfo("vgId:%d, vnode snapshot tsdb writer close for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path); taosMemoryFree(pWriter); *ppWriter = NULL; @@ -1224,6 +1257,7 @@ int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData) _exit: tsdbDebug("vgId:%d, tsdb snapshot write for %s succeed", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path); + return code; _err: diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c index 60f0b18a62121697ad0081e13941dadde655f4b3..6db9d5e6f40c5d35e52d90dd86b28f4cb7a94676 100644 --- a/source/dnode/vnode/src/tsdb/tsdbUtil.c +++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c @@ -152,25 +152,6 @@ int32_t tTABLEIDCmprFn(const void *p1, const void *p2) { return 0; } -// TSDBKEY ====================================================== -static FORCE_INLINE int32_t tPutTSDBKEY(uint8_t *p, TSDBKEY *pKey) { - int32_t n = 0; - - n += tPutI64v(p ? p + n : p, pKey->version); - n += tPutI64(p ? p + n : p, pKey->ts); - - return n; -} - -static FORCE_INLINE int32_t tGetTSDBKEY(uint8_t *p, TSDBKEY *pKey) { - int32_t n = 0; - - n += tGetI64v(p + n, &pKey->version); - n += tGetI64(p + n, &pKey->ts); - - return n; -} - // SBlockIdx ====================================================== int32_t tPutBlockIdx(uint8_t *p, void *ph) { int32_t n = 0; @@ -215,34 +196,51 @@ int32_t tCmprBlockIdx(void const *lhs, void const *rhs) { return 0; } +int32_t tCmprBlockL(void const *lhs, void const *rhs) { + SBlockIdx *lBlockIdx = (SBlockIdx *)lhs; + SBlockL *rBlockL = (SBlockL *)rhs; + + if (lBlockIdx->suid < rBlockL->suid) { + return -1; + } else if (lBlockIdx->suid > rBlockL->suid) { + return 1; + } + + if (lBlockIdx->uid < rBlockL->minUid) { + return -1; + } else if (lBlockIdx->uid > rBlockL->maxUid) { + return 1; + } + + return 0; +} + // SBlock ====================================================== void tBlockReset(SBlock *pBlock) { - *pBlock = - (SBlock){.minKey = TSDBKEY_MAX, .maxKey = TSDBKEY_MIN, .minVersion = VERSION_MAX, .maxVersion = VERSION_MIN}; + *pBlock = (SBlock){.minKey = TSDBKEY_MAX, .maxKey = TSDBKEY_MIN, .minVer = VERSION_MAX, .maxVer = VERSION_MIN}; } int32_t tPutBlock(uint8_t *p, void *ph) { int32_t n = 0; SBlock *pBlock = (SBlock *)ph; - n += tPutTSDBKEY(p ? p + n : p, &pBlock->minKey); - n += tPutTSDBKEY(p ? p + n : p, &pBlock->maxKey); - n += tPutI64v(p ? p + n : p, pBlock->minVersion); - n += tPutI64v(p ? p + n : p, pBlock->maxVersion); + n += tPutI64v(p ? p + n : p, pBlock->minKey.version); + n += tPutI64v(p ? p + n : p, pBlock->minKey.ts); + n += tPutI64v(p ? p + n : p, pBlock->maxKey.version); + n += tPutI64v(p ? p + n : p, pBlock->maxKey.ts); + n += tPutI64v(p ? p + n : p, pBlock->minVer); + n += tPutI64v(p ? p + n : p, pBlock->maxVer); n += tPutI32v(p ? p + n : p, pBlock->nRow); - n += tPutI8(p ? p + n : p, pBlock->last); n += tPutI8(p ? p + n : p, pBlock->hasDup); n += tPutI8(p ? p + n : p, pBlock->nSubBlock); for (int8_t iSubBlock = 0; iSubBlock < pBlock->nSubBlock; iSubBlock++) { - n += tPutI32v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].nRow); - n += tPutI8(p ? p + n : p, pBlock->aSubBlock[iSubBlock].cmprAlg); n += tPutI64v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].offset); - n += tPutI32v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].szBlockCol); - n += tPutI32v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].szVersion); - n += tPutI32v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].szTSKEY); n += tPutI32v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].szBlock); - n += tPutI64v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].sOffset); - n += tPutI32v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].nSma); + n += tPutI32v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].szKey); + } + if (pBlock->nSubBlock == 1 && !pBlock->hasDup) { + n += tPutI64v(p ? p + n : p, pBlock->smaInfo.offset); + n += tPutI32v(p ? p + n : p, pBlock->smaInfo.size); } return n; @@ -252,24 +250,26 @@ int32_t tGetBlock(uint8_t *p, void *ph) { int32_t n = 0; SBlock *pBlock = (SBlock *)ph; - n += tGetTSDBKEY(p + n, &pBlock->minKey); - n += tGetTSDBKEY(p + n, &pBlock->maxKey); - n += tGetI64v(p + n, &pBlock->minVersion); - n += tGetI64v(p + n, &pBlock->maxVersion); + n += tGetI64v(p + n, &pBlock->minKey.version); + n += tGetI64v(p + n, &pBlock->minKey.ts); + n += tGetI64v(p + n, &pBlock->maxKey.version); + n += tGetI64v(p + n, &pBlock->maxKey.ts); + n += tGetI64v(p + n, &pBlock->minVer); + n += tGetI64v(p + n, &pBlock->maxVer); n += tGetI32v(p + n, &pBlock->nRow); - n += tGetI8(p + n, &pBlock->last); n += tGetI8(p + n, &pBlock->hasDup); n += tGetI8(p + n, &pBlock->nSubBlock); for (int8_t iSubBlock = 0; iSubBlock < pBlock->nSubBlock; iSubBlock++) { - n += tGetI32v(p + n, &pBlock->aSubBlock[iSubBlock].nRow); - n += tGetI8(p + n, &pBlock->aSubBlock[iSubBlock].cmprAlg); n += tGetI64v(p + n, &pBlock->aSubBlock[iSubBlock].offset); - n += tGetI32v(p + n, &pBlock->aSubBlock[iSubBlock].szBlockCol); - n += tGetI32v(p + n, &pBlock->aSubBlock[iSubBlock].szVersion); - n += tGetI32v(p + n, &pBlock->aSubBlock[iSubBlock].szTSKEY); n += tGetI32v(p + n, &pBlock->aSubBlock[iSubBlock].szBlock); - n += tGetI64v(p + n, &pBlock->aSubBlock[iSubBlock].sOffset); - n += tGetI32v(p + n, &pBlock->aSubBlock[iSubBlock].nSma); + n += tGetI32v(p + n, &pBlock->aSubBlock[iSubBlock].szKey); + } + if (pBlock->nSubBlock == 1 && !pBlock->hasDup) { + n += tGetI64v(p + n, &pBlock->smaInfo.offset); + n += tGetI32v(p + n, &pBlock->smaInfo.size); + } else { + pBlock->smaInfo.offset = 0; + pBlock->smaInfo.size = 0; } return n; @@ -290,10 +290,48 @@ int32_t tBlockCmprFn(const void *p1, const void *p2) { bool tBlockHasSma(SBlock *pBlock) { if (pBlock->nSubBlock > 1) return false; - if (pBlock->last) return false; if (pBlock->hasDup) return false; - return pBlock->aSubBlock[0].nSma > 0; + return pBlock->smaInfo.size > 0; +} + +// SBlockL ====================================================== +int32_t tPutBlockL(uint8_t *p, void *ph) { + int32_t n = 0; + SBlockL *pBlockL = (SBlockL *)ph; + + n += tPutI64(p ? p + n : p, pBlockL->suid); + n += tPutI64(p ? p + n : p, pBlockL->minUid); + n += tPutI64(p ? p + n : p, pBlockL->maxUid); + n += tPutI64v(p ? p + n : p, pBlockL->minKey); + n += tPutI64v(p ? p + n : p, pBlockL->maxKey); + n += tPutI64v(p ? p + n : p, pBlockL->minVer); + n += tPutI64v(p ? p + n : p, pBlockL->maxVer); + n += tPutI32v(p ? p + n : p, pBlockL->nRow); + n += tPutI64v(p ? p + n : p, pBlockL->bInfo.offset); + n += tPutI32v(p ? p + n : p, pBlockL->bInfo.szBlock); + n += tPutI32v(p ? p + n : p, pBlockL->bInfo.szKey); + + return n; +} + +int32_t tGetBlockL(uint8_t *p, void *ph) { + int32_t n = 0; + SBlockL *pBlockL = (SBlockL *)ph; + + n += tGetI64(p + n, &pBlockL->suid); + n += tGetI64(p + n, &pBlockL->minUid); + n += tGetI64(p + n, &pBlockL->maxUid); + n += tGetI64v(p + n, &pBlockL->minKey); + n += tGetI64v(p + n, &pBlockL->maxKey); + n += tGetI64v(p + n, &pBlockL->minVer); + n += tGetI64v(p + n, &pBlockL->maxVer); + n += tGetI32v(p + n, &pBlockL->nRow); + n += tGetI64v(p + n, &pBlockL->bInfo.offset); + n += tGetI32v(p + n, &pBlockL->bInfo.szBlock); + n += tGetI32v(p + n, &pBlockL->bInfo.szKey); + + return n; } // SBlockCol ====================================================== @@ -307,15 +345,25 @@ int32_t tPutBlockCol(uint8_t *p, void *ph) { n += tPutI8(p ? p + n : p, pBlockCol->type); n += tPutI8(p ? p + n : p, pBlockCol->smaOn); n += tPutI8(p ? p + n : p, pBlockCol->flag); + n += tPutI32v(p ? p + n : p, pBlockCol->szOrigin); if (pBlockCol->flag != HAS_NULL) { + if (pBlockCol->flag != HAS_VALUE) { + n += tPutI32v(p ? p + n : p, pBlockCol->szBitmap); + } + + if (IS_VAR_DATA_TYPE(pBlockCol->type)) { + n += tPutI32v(p ? p + n : p, pBlockCol->szOffset); + } + + if (pBlockCol->flag != (HAS_NULL | HAS_NONE)) { + n += tPutI32v(p ? p + n : p, pBlockCol->szValue); + } + n += tPutI32v(p ? p + n : p, pBlockCol->offset); - n += tPutI32v(p ? p + n : p, pBlockCol->szBitmap); - n += tPutI32v(p ? p + n : p, pBlockCol->szOffset); - n += tPutI32v(p ? p + n : p, pBlockCol->szValue); - n += tPutI32v(p ? p + n : p, pBlockCol->szOrigin); } +_exit: return n; } @@ -327,15 +375,29 @@ int32_t tGetBlockCol(uint8_t *p, void *ph) { n += tGetI8(p + n, &pBlockCol->type); n += tGetI8(p + n, &pBlockCol->smaOn); n += tGetI8(p + n, &pBlockCol->flag); + n += tGetI32v(p + n, &pBlockCol->szOrigin); ASSERT(pBlockCol->flag && (pBlockCol->flag != HAS_NONE)); + pBlockCol->szBitmap = 0; + pBlockCol->szOffset = 0; + pBlockCol->szValue = 0; + pBlockCol->offset = 0; + if (pBlockCol->flag != HAS_NULL) { + if (pBlockCol->flag != HAS_VALUE) { + n += tGetI32v(p + n, &pBlockCol->szBitmap); + } + + if (IS_VAR_DATA_TYPE(pBlockCol->type)) { + n += tGetI32v(p + n, &pBlockCol->szOffset); + } + + if (pBlockCol->flag != (HAS_NULL | HAS_NONE)) { + n += tGetI32v(p + n, &pBlockCol->szValue); + } + n += tGetI32v(p + n, &pBlockCol->offset); - n += tGetI32v(p + n, &pBlockCol->szBitmap); - n += tGetI32v(p + n, &pBlockCol->szOffset); - n += tGetI32v(p + n, &pBlockCol->szValue); - n += tGetI32v(p + n, &pBlockCol->szOrigin); } return n; @@ -866,6 +928,9 @@ int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal) { size = BIT2_SIZE(pColData->nVal + 1); code = tRealloc(&pColData->pBitMap, size); if (code) goto _exit; + if ((pColData->nVal & 3) == 0) { + pColData->pBitMap[pColData->nVal >> 2] = 0; + } // put value if (pColVal->isNone) { @@ -910,13 +975,14 @@ int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest) { int32_t size; ASSERT(pColDataSrc->nVal > 0); + ASSERT(pColDataDest->cid = pColDataSrc->cid); + ASSERT(pColDataDest->type = pColDataSrc->type); - pColDataDest->cid = pColDataSrc->cid; - pColDataDest->type = pColDataSrc->type; pColDataDest->smaOn = pColDataSrc->smaOn; pColDataDest->nVal = pColDataSrc->nVal; pColDataDest->flag = pColDataSrc->flag; + // bitmap if (pColDataSrc->flag != HAS_NONE && pColDataSrc->flag != HAS_NULL && pColDataSrc->flag != HAS_VALUE) { size = BIT2_SIZE(pColDataSrc->nVal); code = tRealloc(&pColDataDest->pBitMap, size); @@ -924,6 +990,7 @@ int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest) { memcpy(pColDataDest->pBitMap, pColDataSrc->pBitMap, size); } + // offset if (IS_VAR_DATA_TYPE(pColDataDest->type)) { size = sizeof(int32_t) * pColDataSrc->nVal; @@ -933,9 +1000,10 @@ int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest) { memcpy(pColDataDest->aOffset, pColDataSrc->aOffset, size); } + // value + pColDataDest->nData = pColDataSrc->nData; code = tRealloc(&pColDataDest->pData, pColDataSrc->nData); if (code) goto _exit; - pColDataDest->nData = pColDataSrc->nData; memcpy(pColDataDest->pData, pColDataSrc->pData, pColDataDest->nData); _exit: @@ -1068,10 +1136,13 @@ static FORCE_INLINE int32_t tColDataCmprFn(const void *p1, const void *p2) { } // SBlockData ====================================================== -int32_t tBlockDataInit(SBlockData *pBlockData) { +int32_t tBlockDataCreate(SBlockData *pBlockData) { int32_t code = 0; + pBlockData->suid = 0; + pBlockData->uid = 0; pBlockData->nRow = 0; + pBlockData->aUid = NULL; pBlockData->aVersion = NULL; pBlockData->aTSKEY = NULL; pBlockData->aIdx = taosArrayInit(0, sizeof(int32_t)); @@ -1090,42 +1161,77 @@ _exit: return code; } -void tBlockDataReset(SBlockData *pBlockData) { - pBlockData->nRow = 0; - taosArrayClear(pBlockData->aIdx); -} - -void tBlockDataClear(SBlockData *pBlockData, int8_t deepClear) { +void tBlockDataDestroy(SBlockData *pBlockData, int8_t deepClear) { + tFree((uint8_t *)pBlockData->aUid); tFree((uint8_t *)pBlockData->aVersion); tFree((uint8_t *)pBlockData->aTSKEY); taosArrayDestroy(pBlockData->aIdx); taosArrayDestroyEx(pBlockData->aColData, deepClear ? tColDataClear : NULL); - pBlockData->aColData = NULL; - pBlockData->aIdx = NULL; - pBlockData->aTSKEY = NULL; + pBlockData->aUid = NULL; pBlockData->aVersion = NULL; + pBlockData->aTSKEY = NULL; + pBlockData->aIdx = NULL; + pBlockData->aColData = NULL; } -int32_t tBlockDataSetSchema(SBlockData *pBlockData, STSchema *pTSchema) { - int32_t code = 0; - SColData *pColData; - STColumn *pTColumn; +int32_t tBlockDataInit(SBlockData *pBlockData, int64_t suid, int64_t uid, STSchema *pTSchema) { + int32_t code = 0; + + ASSERT(suid || uid); + + pBlockData->suid = suid; + pBlockData->uid = uid; + pBlockData->nRow = 0; - tBlockDataReset(pBlockData); + taosArrayClear(pBlockData->aIdx); for (int32_t iColumn = 1; iColumn < pTSchema->numOfCols; iColumn++) { - pTColumn = &pTSchema->columns[iColumn]; + STColumn *pTColumn = &pTSchema->columns[iColumn]; + SColData *pColData; code = tBlockDataAddColData(pBlockData, iColumn - 1, &pColData); if (code) goto _exit; - tColDataInit(pColData, pTColumn->colId, pTColumn->type, (pTColumn->flags & COL_SMA_ON) != 0); + tColDataInit(pColData, pTColumn->colId, pTColumn->type, (pTColumn->flags & COL_SMA_ON) ? 1 : 0); } _exit: return code; } -void tBlockDataClearData(SBlockData *pBlockData) { +int32_t tBlockDataInitEx(SBlockData *pBlockData, SBlockData *pBlockDataFrom) { + int32_t code = 0; + + ASSERT(pBlockDataFrom->suid || pBlockDataFrom->uid); + + pBlockData->suid = pBlockDataFrom->suid; + pBlockData->uid = pBlockDataFrom->uid; + pBlockData->nRow = 0; + + taosArrayClear(pBlockData->aIdx); + for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockDataFrom->aIdx); iColData++) { + SColData *pColDataFrom = tBlockDataGetColDataByIdx(pBlockDataFrom, iColData); + + SColData *pColData; + code = tBlockDataAddColData(pBlockData, iColData, &pColData); + if (code) goto _exit; + + tColDataInit(pColData, pColDataFrom->cid, pColDataFrom->type, pColDataFrom->smaOn); + } + +_exit: + return code; +} + +void tBlockDataReset(SBlockData *pBlockData) { + pBlockData->suid = 0; + pBlockData->uid = 0; + pBlockData->nRow = 0; + taosArrayClear(pBlockData->aIdx); +} + +void tBlockDataClear(SBlockData *pBlockData) { + ASSERT(pBlockData->suid || pBlockData->uid); + pBlockData->nRow = 0; for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) { SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData); @@ -1159,52 +1265,47 @@ _err: return code; } -int32_t tBlockDataAppendRow(SBlockData *pBlockData, TSDBROW *pRow, STSchema *pTSchema) { +int32_t tBlockDataAppendRow(SBlockData *pBlockData, TSDBROW *pRow, STSchema *pTSchema, int64_t uid) { int32_t code = 0; - // TSDBKEY + ASSERT(pBlockData->suid || pBlockData->uid); + + // uid + if (pBlockData->uid == 0) { + ASSERT(uid); + code = tRealloc((uint8_t **)&pBlockData->aUid, sizeof(int64_t) * (pBlockData->nRow + 1)); + if (code) goto _err; + pBlockData->aUid[pBlockData->nRow] = uid; + } + // version code = tRealloc((uint8_t **)&pBlockData->aVersion, sizeof(int64_t) * (pBlockData->nRow + 1)); if (code) goto _err; + pBlockData->aVersion[pBlockData->nRow] = TSDBROW_VERSION(pRow); + // timestamp code = tRealloc((uint8_t **)&pBlockData->aTSKEY, sizeof(TSKEY) * (pBlockData->nRow + 1)); if (code) goto _err; - pBlockData->aVersion[pBlockData->nRow] = TSDBROW_VERSION(pRow); pBlockData->aTSKEY[pBlockData->nRow] = TSDBROW_TS(pRow); // OTHER - int32_t iColData = 0; - int32_t nColData = taosArrayGetSize(pBlockData->aIdx); - SRowIter iter = {0}; - SRowIter *pIter = &iter; - SColData *pColData; - SColVal *pColVal; - - if (nColData == 0) goto _exit; - - tRowIterInit(pIter, pRow, pTSchema); - pColData = tBlockDataGetColDataByIdx(pBlockData, iColData); - pColVal = tRowIterNext(pIter); - - while (pColData) { - if (pColVal) { - if (pColData->cid == pColVal->cid) { - code = tColDataAppendValue(pColData, pColVal); - if (code) goto _err; - - pColVal = tRowIterNext(pIter); - pColData = ((++iColData) < nColData) ? tBlockDataGetColDataByIdx(pBlockData, iColData) : NULL; - } else if (pColData->cid < pColVal->cid) { - code = tColDataAppendValue(pColData, &COL_VAL_NONE(pColData->cid, pColData->type)); - if (code) goto _err; - - pColData = ((++iColData) < nColData) ? tBlockDataGetColDataByIdx(pBlockData, iColData) : NULL; - } else { - pColVal = tRowIterNext(pIter); - } - } else { + SRowIter rIter = {0}; + SColVal *pColVal; + + tRowIterInit(&rIter, pRow, pTSchema); + pColVal = tRowIterNext(&rIter); + for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) { + SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData); + + while (pColVal && pColVal->cid < pColData->cid) { + pColVal = tRowIterNext(&rIter); + } + + if (pColVal == NULL || pColVal->cid > pColData->cid) { code = tColDataAppendValue(pColData, &COL_VAL_NONE(pColData->cid, pColData->type)); if (code) goto _err; - - pColData = ((++iColData) < nColData) ? tBlockDataGetColDataByIdx(pBlockData, iColData) : NULL; + } else { + code = tColDataAppendValue(pColData, pColVal); + if (code) goto _err; + pColVal = tRowIterNext(&rIter); } } @@ -1259,128 +1360,111 @@ _exit: int32_t tBlockDataMerge(SBlockData *pBlockData1, SBlockData *pBlockData2, SBlockData *pBlockData) { int32_t code = 0; - // set target - int32_t iColData1 = 0; - int32_t nColData1 = taosArrayGetSize(pBlockData1->aIdx); - int32_t iColData2 = 0; - int32_t nColData2 = taosArrayGetSize(pBlockData2->aIdx); - SColData *pColData1; - SColData *pColData2; - SColData *pColData; - - tBlockDataReset(pBlockData); - while (iColData1 < nColData1 && iColData2 < nColData2) { - pColData1 = tBlockDataGetColDataByIdx(pBlockData1, iColData1); - pColData2 = tBlockDataGetColDataByIdx(pBlockData2, iColData2); - - if (pColData1->cid == pColData2->cid) { - code = tBlockDataAddColData(pBlockData, taosArrayGetSize(pBlockData->aIdx), &pColData); - if (code) goto _exit; - tColDataInit(pColData, pColData2->cid, pColData2->type, pColData2->smaOn); - - iColData1++; - iColData2++; - } else if (pColData1->cid < pColData2->cid) { - code = tBlockDataAddColData(pBlockData, taosArrayGetSize(pBlockData->aIdx), &pColData); - if (code) goto _exit; - tColDataInit(pColData, pColData1->cid, pColData1->type, pColData1->smaOn); - - iColData1++; - } else { - code = tBlockDataAddColData(pBlockData, taosArrayGetSize(pBlockData->aIdx), &pColData); - if (code) goto _exit; - tColDataInit(pColData, pColData2->cid, pColData2->type, pColData2->smaOn); - - iColData2++; - } - } - - while (iColData1 < nColData1) { - code = tBlockDataAddColData(pBlockData, taosArrayGetSize(pBlockData->aIdx), &pColData); - if (code) goto _exit; - tColDataInit(pColData, pColData1->cid, pColData1->type, pColData1->smaOn); + ASSERT(pBlockData->suid == pBlockData1->suid); + ASSERT(pBlockData->uid == pBlockData1->uid); + ASSERT(pBlockData1->nRow > 0); + ASSERT(pBlockData2->nRow > 0); - iColData1++; - } + tBlockDataClear(pBlockData); - while (iColData2 < nColData2) { - code = tBlockDataAddColData(pBlockData, taosArrayGetSize(pBlockData->aIdx), &pColData); - if (code) goto _exit; - tColDataInit(pColData, pColData2->cid, pColData2->type, pColData2->smaOn); + TSDBROW row1 = tsdbRowFromBlockData(pBlockData1, 0); + TSDBROW row2 = tsdbRowFromBlockData(pBlockData2, 0); + TSDBROW *pRow1 = &row1; + TSDBROW *pRow2 = &row2; - iColData2++; - } - - // loop to merge - int32_t iRow1 = 0; - int32_t nRow1 = pBlockData1->nRow; - int32_t iRow2 = 0; - int32_t nRow2 = pBlockData2->nRow; - TSDBROW row1; - TSDBROW row2; - int32_t c; + while (pRow1 && pRow2) { + int32_t c = tsdbRowCmprFn(pRow1, pRow2); - while (iRow1 < nRow1 && iRow2 < nRow2) { - row1 = tsdbRowFromBlockData(pBlockData1, iRow1); - row2 = tsdbRowFromBlockData(pBlockData2, iRow2); - - c = tsdbKeyCmprFn(&TSDBROW_KEY(&row1), &TSDBROW_KEY(&row2)); if (c < 0) { - code = tBlockDataAppendRow(pBlockData, &row1, NULL); + code = tBlockDataAppendRow(pBlockData, pRow1, NULL, + pBlockData1->uid ? pBlockData1->uid : pBlockData1->aUid[pRow1->iRow]); if (code) goto _exit; - iRow1++; + + pRow1->iRow++; + if (pRow1->iRow < pBlockData1->nRow) { + *pRow1 = tsdbRowFromBlockData(pBlockData1, pRow1->iRow); + } else { + pRow1 = NULL; + } } else if (c > 0) { - code = tBlockDataAppendRow(pBlockData, &row2, NULL); + code = tBlockDataAppendRow(pBlockData, pRow2, NULL, + pBlockData2->uid ? pBlockData2->uid : pBlockData2->aUid[pRow2->iRow]); if (code) goto _exit; - iRow2++; + + pRow2->iRow++; + if (pRow2->iRow < pBlockData2->nRow) { + *pRow2 = tsdbRowFromBlockData(pBlockData2, pRow2->iRow); + } else { + pRow2 = NULL; + } } else { ASSERT(0); } } - while (iRow1 < nRow1) { - row1 = tsdbRowFromBlockData(pBlockData1, iRow1); - code = tBlockDataAppendRow(pBlockData, &row1, NULL); + while (pRow1) { + code = tBlockDataAppendRow(pBlockData, pRow1, NULL, + pBlockData1->uid ? pBlockData1->uid : pBlockData1->aUid[pRow1->iRow]); if (code) goto _exit; - iRow1++; + + pRow1->iRow++; + if (pRow1->iRow < pBlockData1->nRow) { + *pRow1 = tsdbRowFromBlockData(pBlockData1, pRow1->iRow); + } else { + pRow1 = NULL; + } } - while (iRow2 < nRow2) { - row2 = tsdbRowFromBlockData(pBlockData2, iRow2); - code = tBlockDataAppendRow(pBlockData, &row2, NULL); + while (pRow2) { + code = tBlockDataAppendRow(pBlockData, pRow2, NULL, + pBlockData2->uid ? pBlockData2->uid : pBlockData2->aUid[pRow2->iRow]); if (code) goto _exit; - iRow2++; + + pRow2->iRow++; + if (pRow2->iRow < pBlockData2->nRow) { + *pRow2 = tsdbRowFromBlockData(pBlockData2, pRow2->iRow); + } else { + pRow2 = NULL; + } } _exit: return code; } -int32_t tBlockDataCopy(SBlockData *pBlockDataSrc, SBlockData *pBlockDataDest) { - int32_t code = 0; - SColData *pColDataSrc; - SColData *pColDataDest; +int32_t tBlockDataCopy(SBlockData *pSrc, SBlockData *pDest) { + int32_t code = 0; + + tBlockDataClear(pDest); - ASSERT(pBlockDataSrc->nRow > 0); + ASSERT(pDest->suid == pSrc->suid); + ASSERT(pDest->uid == pSrc->uid); + ASSERT(taosArrayGetSize(pSrc->aIdx) == taosArrayGetSize(pDest->aIdx)); - tBlockDataReset(pBlockDataDest); + pDest->nRow = pSrc->nRow; + + if (pSrc->uid == 0) { + code = tRealloc((uint8_t **)&pDest->aUid, sizeof(int64_t) * pDest->nRow); + if (code) goto _exit; + memcpy(pDest->aUid, pSrc->aUid, sizeof(int64_t) * pDest->nRow); + } - pBlockDataDest->nRow = pBlockDataSrc->nRow; - // TSDBKEY - code = tRealloc((uint8_t **)&pBlockDataDest->aVersion, sizeof(int64_t) * pBlockDataSrc->nRow); + code = tRealloc((uint8_t **)&pDest->aVersion, sizeof(int64_t) * pDest->nRow); if (code) goto _exit; - code = tRealloc((uint8_t **)&pBlockDataDest->aTSKEY, sizeof(TSKEY) * pBlockDataSrc->nRow); + memcpy(pDest->aVersion, pSrc->aVersion, sizeof(int64_t) * pDest->nRow); + + code = tRealloc((uint8_t **)&pDest->aTSKEY, sizeof(TSKEY) * pDest->nRow); if (code) goto _exit; - memcpy(pBlockDataDest->aVersion, pBlockDataSrc->aVersion, sizeof(int64_t) * pBlockDataSrc->nRow); - memcpy(pBlockDataDest->aTSKEY, pBlockDataSrc->aTSKEY, sizeof(TSKEY) * pBlockDataSrc->nRow); + memcpy(pDest->aTSKEY, pSrc->aTSKEY, sizeof(TSKEY) * pDest->nRow); - // other - for (size_t iColData = 0; iColData < taosArrayGetSize(pBlockDataSrc->aIdx); iColData++) { - pColDataSrc = tBlockDataGetColDataByIdx(pBlockDataSrc, iColData); - code = tBlockDataAddColData(pBlockDataDest, iColData, &pColDataDest); - if (code) goto _exit; + for (int32_t iColData = 0; iColData < taosArrayGetSize(pSrc->aIdx); iColData++) { + SColData *pColSrc = tBlockDataGetColDataByIdx(pSrc, iColData); + SColData *pColDest = tBlockDataGetColDataByIdx(pDest, iColData); - code = tColDataCopy(pColDataSrc, pColDataDest); + ASSERT(pColSrc->cid == pColDest->cid); + ASSERT(pColSrc->type == pColDest->type); + + code = tColDataCopy(pColSrc, pColDest); if (code) goto _exit; } @@ -1416,57 +1500,249 @@ void tBlockDataGetColData(SBlockData *pBlockData, int16_t cid, SColData **ppColD *ppColData = NULL; } -int32_t tPutBlockData(uint8_t *p, SBlockData *pBlockData) { - int32_t n = 0; +int32_t tCmprBlockData(SBlockData *pBlockData, int8_t cmprAlg, uint8_t **ppOut, int32_t *szOut, uint8_t *aBuf[], + int32_t aBufN[]) { + int32_t code = 0; - n += tPutI32v(p ? p + n : p, pBlockData->nRow); - if (p) { - memcpy(p + n, pBlockData->aVersion, sizeof(int64_t) * pBlockData->nRow); + SDiskDataHdr hdr = {.delimiter = TSDB_FILE_DLMT, + .fmtVer = 0, + .suid = pBlockData->suid, + .uid = pBlockData->uid, + .nRow = pBlockData->nRow, + .cmprAlg = cmprAlg}; + + // encode ================= + // columns AND SBlockCol + aBufN[0] = 0; + for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) { + SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData); + + ASSERT(pColData->flag); + + if (pColData->flag == HAS_NONE) continue; + + SBlockCol blockCol = {.cid = pColData->cid, + .type = pColData->type, + .smaOn = pColData->smaOn, + .flag = pColData->flag, + .szOrigin = pColData->nData}; + + if (pColData->flag != HAS_NULL) { + code = tsdbCmprColData(pColData, cmprAlg, &blockCol, &aBuf[0], aBufN[0], &aBuf[2]); + if (code) goto _exit; + + blockCol.offset = aBufN[0]; + aBufN[0] = aBufN[0] + blockCol.szBitmap + blockCol.szOffset + blockCol.szValue + sizeof(TSCKSUM); + } + + code = tRealloc(&aBuf[1], hdr.szBlkCol + tPutBlockCol(NULL, &blockCol)); + if (code) goto _exit; + hdr.szBlkCol += tPutBlockCol(aBuf[1] + hdr.szBlkCol, &blockCol); } - n = n + sizeof(int64_t) * pBlockData->nRow; - if (p) { - memcpy(p + n, pBlockData->aTSKEY, sizeof(TSKEY) * pBlockData->nRow); + + aBufN[1] = 0; + if (hdr.szBlkCol > 0) { + aBufN[1] = hdr.szBlkCol + sizeof(TSCKSUM); + + code = tRealloc(&aBuf[1], aBufN[1]); + if (code) goto _exit; + + taosCalcChecksumAppend(0, aBuf[1], aBufN[1]); } - n = n + sizeof(TSKEY) * pBlockData->nRow; - int32_t nCol = taosArrayGetSize(pBlockData->aIdx); - n += tPutI32v(p ? p + n : p, nCol); - for (int32_t iCol = 0; iCol < nCol; iCol++) { - SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iCol); - n += tPutColData(p ? p + n : p, pColData); + // uid + version + tskey + aBufN[2] = 0; + if (pBlockData->uid == 0) { + code = tsdbCmprData((uint8_t *)pBlockData->aUid, sizeof(int64_t) * pBlockData->nRow, TSDB_DATA_TYPE_BIGINT, cmprAlg, + &aBuf[2], aBufN[2], &hdr.szUid, &aBuf[3]); + if (code) goto _exit; } + aBufN[2] += hdr.szUid; - return n; + code = tsdbCmprData((uint8_t *)pBlockData->aVersion, sizeof(int64_t) * pBlockData->nRow, TSDB_DATA_TYPE_BIGINT, + cmprAlg, &aBuf[2], aBufN[2], &hdr.szVer, &aBuf[3]); + if (code) goto _exit; + aBufN[2] += hdr.szVer; + + code = tsdbCmprData((uint8_t *)pBlockData->aTSKEY, sizeof(TSKEY) * pBlockData->nRow, TSDB_DATA_TYPE_TIMESTAMP, + cmprAlg, &aBuf[2], aBufN[2], &hdr.szKey, &aBuf[3]); + if (code) goto _exit; + aBufN[2] += hdr.szKey; + + aBufN[2] += sizeof(TSCKSUM); + code = tRealloc(&aBuf[2], aBufN[2]); + if (code) goto _exit; + + // hdr + aBufN[3] = tPutDiskDataHdr(NULL, &hdr); + code = tRealloc(&aBuf[3], aBufN[3]); + if (code) goto _exit; + tPutDiskDataHdr(aBuf[3], &hdr); + taosCalcChecksumAppend(taosCalcChecksum(0, aBuf[3], aBufN[3]), aBuf[2], aBufN[2]); + + // aggragate + if (ppOut) { + *szOut = aBufN[0] + aBufN[1] + aBufN[2] + aBufN[3]; + code = tRealloc(ppOut, *szOut); + if (code) goto _exit; + + memcpy(*ppOut, aBuf[3], aBufN[3]); + memcpy(*ppOut + aBufN[3], aBuf[2], aBufN[2]); + if (aBufN[1]) { + memcpy(*ppOut + aBufN[3] + aBufN[2], aBuf[1], aBufN[1]); + } + if (aBufN[0]) { + memcpy(*ppOut + aBufN[3] + aBufN[2] + aBufN[1], aBuf[0], aBufN[0]); + } + } + +_exit: + return code; } -int32_t tGetBlockData(uint8_t *p, SBlockData *pBlockData) { - int32_t n = 0; +int32_t tDecmprBlockData(uint8_t *pIn, int32_t szIn, SBlockData *pBlockData, uint8_t *aBuf[]) { + int32_t code = 0; + + tBlockDataClear(pBlockData); + + int32_t n = 0; + SDiskDataHdr hdr = {0}; + + // SDiskDataHdr + n += tGetDiskDataHdr(pIn + n, &hdr); + if (!taosCheckChecksumWhole(pIn, n + hdr.szUid + hdr.szVer + hdr.szKey + sizeof(TSCKSUM))) { + code = TSDB_CODE_FILE_CORRUPTED; + goto _exit; + } + ASSERT(hdr.delimiter == TSDB_FILE_DLMT); + + pBlockData->suid = hdr.suid; + pBlockData->uid = hdr.uid; + pBlockData->nRow = hdr.nRow; - tBlockDataReset(pBlockData); + // uid + if (hdr.uid == 0) { + ASSERT(hdr.szUid); + code = tsdbDecmprData(pIn + n, hdr.szUid, TSDB_DATA_TYPE_BIGINT, hdr.cmprAlg, (uint8_t **)&pBlockData->aUid, + sizeof(int64_t) * hdr.nRow, &aBuf[0]); + if (code) goto _exit; + } else { + ASSERT(!hdr.szUid); + } + n += hdr.szUid; - n += tGetI32v(p + n, &pBlockData->nRow); - pBlockData->aVersion = (int64_t *)(p + n); - n = n + sizeof(int64_t) * pBlockData->nRow; - pBlockData->aTSKEY = (TSKEY *)(p + n); - n = n + sizeof(TSKEY) * pBlockData->nRow; + // version + code = tsdbDecmprData(pIn + n, hdr.szVer, TSDB_DATA_TYPE_BIGINT, hdr.cmprAlg, (uint8_t **)&pBlockData->aVersion, + sizeof(int64_t) * hdr.nRow, &aBuf[0]); + if (code) goto _exit; + n += hdr.szVer; + + // TSKEY + code = tsdbDecmprData(pIn + n, hdr.szKey, TSDB_DATA_TYPE_TIMESTAMP, hdr.cmprAlg, (uint8_t **)&pBlockData->aTSKEY, + sizeof(TSKEY) * hdr.nRow, &aBuf[0]); + if (code) goto _exit; + n = n + hdr.szKey + sizeof(TSCKSUM); + + // loop to decode each column data + if (hdr.szBlkCol == 0) goto _exit; + + int32_t nt = 0; + while (nt < hdr.szBlkCol) { + SBlockCol blockCol = {0}; + nt += tGetBlockCol(pIn + n + nt, &blockCol); + ASSERT(nt <= hdr.szBlkCol); - int32_t nCol; - n += tGetI32v(p + n, &nCol); - for (int32_t iCol = 0; iCol < nCol; iCol++) { SColData *pColData; + code = tBlockDataAddColData(pBlockData, taosArrayGetSize(pBlockData->aIdx), &pColData); + if (code) goto _exit; - if (tBlockDataAddColData(pBlockData, iCol, &pColData)) return -1; - n += tGetColData(p + n, pColData); + tColDataInit(pColData, blockCol.cid, blockCol.type, blockCol.smaOn); + if (blockCol.flag == HAS_NULL) { + for (int32_t iRow = 0; iRow < hdr.nRow; iRow++) { + code = tColDataAppendValue(pColData, &COL_VAL_NULL(blockCol.cid, blockCol.type)); + if (code) goto _exit; + } + } else { + code = tsdbDecmprColData(pIn + n + hdr.szBlkCol + sizeof(TSCKSUM) + blockCol.offset, &blockCol, hdr.cmprAlg, + hdr.nRow, pColData, &aBuf[0]); + if (code) goto _exit; + } } +_exit: + return code; +} + +// SDiskDataHdr ============================== +int32_t tPutDiskDataHdr(uint8_t *p, void *ph) { + int32_t n = 0; + SDiskDataHdr *pHdr = (SDiskDataHdr *)ph; + + n += tPutU32(p ? p + n : p, pHdr->delimiter); + n += tPutU32v(p ? p + n : p, pHdr->fmtVer); + n += tPutI64(p ? p + n : p, pHdr->suid); + n += tPutI64(p ? p + n : p, pHdr->uid); + n += tPutI32v(p ? p + n : p, pHdr->szUid); + n += tPutI32v(p ? p + n : p, pHdr->szVer); + n += tPutI32v(p ? p + n : p, pHdr->szKey); + n += tPutI32v(p ? p + n : p, pHdr->szBlkCol); + n += tPutI32v(p ? p + n : p, pHdr->nRow); + n += tPutI8(p ? p + n : p, pHdr->cmprAlg); + + return n; +} + +int32_t tGetDiskDataHdr(uint8_t *p, void *ph) { + int32_t n = 0; + SDiskDataHdr *pHdr = (SDiskDataHdr *)ph; + + n += tGetU32(p + n, &pHdr->delimiter); + n += tGetU32v(p + n, &pHdr->fmtVer); + n += tGetI64(p + n, &pHdr->suid); + n += tGetI64(p + n, &pHdr->uid); + n += tGetI32v(p + n, &pHdr->szUid); + n += tGetI32v(p + n, &pHdr->szVer); + n += tGetI32v(p + n, &pHdr->szKey); + n += tGetI32v(p + n, &pHdr->szBlkCol); + n += tGetI32v(p + n, &pHdr->nRow); + n += tGetI8(p + n, &pHdr->cmprAlg); + return n; } // ALGORITHM ============================== +int32_t tPutColumnDataAgg(uint8_t *p, SColumnDataAgg *pColAgg) { + int32_t n = 0; + + n += tPutI16v(p ? p + n : p, pColAgg->colId); + n += tPutI16v(p ? p + n : p, pColAgg->numOfNull); + n += tPutI64(p ? p + n : p, pColAgg->sum); + n += tPutI64(p ? p + n : p, pColAgg->max); + n += tPutI64(p ? p + n : p, pColAgg->min); + + return n; +} + +int32_t tGetColumnDataAgg(uint8_t *p, SColumnDataAgg *pColAgg) { + int32_t n = 0; + + n += tGetI16v(p + n, &pColAgg->colId); + n += tGetI16v(p + n, &pColAgg->numOfNull); + n += tGetI64(p + n, &pColAgg->sum); + n += tGetI64(p + n, &pColAgg->max); + n += tGetI64(p + n, &pColAgg->min); + + return n; +} + void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { SColVal colVal; SColVal *pColVal = &colVal; + memset(pColAgg, 0, sizeof(*pColAgg)); + bool minAssigned = false; + bool maxAssigned = false; + *pColAgg = (SColumnDataAgg){.colId = pColData->cid}; for (int32_t iVal = 0; iVal < pColData->nVal; iVal++) { tColDataGetValue(pColData, iVal, pColVal); @@ -1481,72 +1757,86 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { break; case TSDB_DATA_TYPE_TINYINT: { pColAgg->sum += colVal.value.i8; - if (pColAgg->min > colVal.value.i8) { + if (!minAssigned || pColAgg->min > colVal.value.i8) { pColAgg->min = colVal.value.i8; + minAssigned = true; } - if (pColAgg->max < colVal.value.i8) { + if (!maxAssigned || pColAgg->max < colVal.value.i8) { pColAgg->max = colVal.value.i8; + maxAssigned = true; } break; } case TSDB_DATA_TYPE_SMALLINT: { pColAgg->sum += colVal.value.i16; - if (pColAgg->min > colVal.value.i16) { + if (!minAssigned || pColAgg->min > colVal.value.i16) { pColAgg->min = colVal.value.i16; + minAssigned = true; } - if (pColAgg->max < colVal.value.i16) { + if (!maxAssigned || pColAgg->max < colVal.value.i16) { pColAgg->max = colVal.value.i16; + maxAssigned = true; } break; } case TSDB_DATA_TYPE_INT: { pColAgg->sum += colVal.value.i32; - if (pColAgg->min > colVal.value.i32) { + if (!minAssigned || pColAgg->min > colVal.value.i32) { pColAgg->min = colVal.value.i32; + minAssigned = true; } - if (pColAgg->max < colVal.value.i32) { + if (!maxAssigned || pColAgg->max < colVal.value.i32) { pColAgg->max = colVal.value.i32; + maxAssigned = true; } break; } case TSDB_DATA_TYPE_BIGINT: { pColAgg->sum += colVal.value.i64; - if (pColAgg->min > colVal.value.i64) { + if (!minAssigned || pColAgg->min > colVal.value.i64) { pColAgg->min = colVal.value.i64; + minAssigned = true; } - if (pColAgg->max < colVal.value.i64) { + if (!maxAssigned || pColAgg->max < colVal.value.i64) { pColAgg->max = colVal.value.i64; + maxAssigned = true; } break; } case TSDB_DATA_TYPE_FLOAT: { - *(double*)(&pColAgg->sum) += colVal.value.f; - if (*(double*)(&pColAgg->min) > colVal.value.f) { - *(double*)(&pColAgg->min) = colVal.value.f; + *(double *)(&pColAgg->sum) += colVal.value.f; + if (!minAssigned || *(double *)(&pColAgg->min) > colVal.value.f) { + *(double *)(&pColAgg->min) = colVal.value.f; + minAssigned = true; } - if (*(double*)(&pColAgg->max) < colVal.value.f) { - *(double*)(&pColAgg->max) = colVal.value.f; + if (!maxAssigned || *(double *)(&pColAgg->max) < colVal.value.f) { + *(double *)(&pColAgg->max) = colVal.value.f; + maxAssigned = true; } break; } case TSDB_DATA_TYPE_DOUBLE: { - *(double*)(&pColAgg->sum) += colVal.value.d; - if (*(double*)(&pColAgg->min) > colVal.value.d) { - *(double*)(&pColAgg->min) = colVal.value.d; + *(double *)(&pColAgg->sum) += colVal.value.d; + if (!minAssigned || *(double *)(&pColAgg->min) > colVal.value.d) { + *(double *)(&pColAgg->min) = colVal.value.d; + minAssigned = true; } - if (*(double*)(&pColAgg->max) < colVal.value.d) { - *(double*)(&pColAgg->max) = colVal.value.d; + if (!maxAssigned || *(double *)(&pColAgg->max) < colVal.value.d) { + *(double *)(&pColAgg->max) = colVal.value.d; + maxAssigned = true; } break; } case TSDB_DATA_TYPE_VARCHAR: break; case TSDB_DATA_TYPE_TIMESTAMP: { - if (pColAgg->min > colVal.value.i64) { + if (!minAssigned || pColAgg->min > colVal.value.i64) { pColAgg->min = colVal.value.i64; + minAssigned = true; } - if (pColAgg->max < colVal.value.i64) { + if (!maxAssigned || pColAgg->max < colVal.value.i64) { pColAgg->max = colVal.value.i64; + maxAssigned = true; } break; } @@ -1554,41 +1844,49 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { break; case TSDB_DATA_TYPE_UTINYINT: { pColAgg->sum += colVal.value.u8; - if (pColAgg->min > colVal.value.u8) { + if (!minAssigned || pColAgg->min > colVal.value.u8) { pColAgg->min = colVal.value.u8; + minAssigned = true; } - if (pColAgg->max < colVal.value.u8) { + if (!maxAssigned || pColAgg->max < colVal.value.u8) { pColAgg->max = colVal.value.u8; + maxAssigned = true; } break; } case TSDB_DATA_TYPE_USMALLINT: { pColAgg->sum += colVal.value.u16; - if (pColAgg->min > colVal.value.u16) { + if (!minAssigned || pColAgg->min > colVal.value.u16) { pColAgg->min = colVal.value.u16; + minAssigned = true; } - if (pColAgg->max < colVal.value.u16) { + if (!maxAssigned || pColAgg->max < colVal.value.u16) { pColAgg->max = colVal.value.u16; + maxAssigned = true; } break; } case TSDB_DATA_TYPE_UINT: { pColAgg->sum += colVal.value.u32; - if (pColAgg->min > colVal.value.u32) { + if (!minAssigned || pColAgg->min > colVal.value.u32) { pColAgg->min = colVal.value.u32; + minAssigned = true; } - if (pColAgg->max < colVal.value.u32) { + if (!minAssigned || pColAgg->max < colVal.value.u32) { pColAgg->max = colVal.value.u32; + maxAssigned = true; } break; } case TSDB_DATA_TYPE_UBIGINT: { pColAgg->sum += colVal.value.u64; - if (pColAgg->min > colVal.value.u64) { + if (!minAssigned || pColAgg->min > colVal.value.u64) { pColAgg->min = colVal.value.u64; + minAssigned = true; } - if (pColAgg->max < colVal.value.u64) { + if (!maxAssigned || pColAgg->max < colVal.value.u64) { pColAgg->max = colVal.value.u64; + maxAssigned = true; } break; } @@ -1608,3 +1906,268 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { } } } + +int32_t tsdbCmprData(uint8_t *pIn, int32_t szIn, int8_t type, int8_t cmprAlg, uint8_t **ppOut, int32_t nOut, + int32_t *szOut, uint8_t **ppBuf) { + int32_t code = 0; + + ASSERT(szIn > 0 && ppOut); + + if (cmprAlg == NO_COMPRESSION) { + code = tRealloc(ppOut, nOut + szIn); + if (code) goto _exit; + + memcpy(*ppOut + nOut, pIn, szIn); + *szOut = szIn; + } else { + int32_t size = szIn + COMP_OVERFLOW_BYTES; + + code = tRealloc(ppOut, nOut + size); + if (code) goto _exit; + + if (cmprAlg == TWO_STAGE_COMP) { + ASSERT(ppBuf); + code = tRealloc(ppBuf, size); + if (code) goto _exit; + } + + *szOut = + tDataTypes[type].compFunc(pIn, szIn, szIn / tDataTypes[type].bytes, *ppOut + nOut, size, cmprAlg, *ppBuf, size); + if (*szOut <= 0) { + code = TSDB_CODE_COMPRESS_ERROR; + goto _exit; + } + } + +_exit: + return code; +} + +int32_t tsdbDecmprData(uint8_t *pIn, int32_t szIn, int8_t type, int8_t cmprAlg, uint8_t **ppOut, int32_t szOut, + uint8_t **ppBuf) { + int32_t code = 0; + + code = tRealloc(ppOut, szOut); + if (code) goto _exit; + + if (cmprAlg == NO_COMPRESSION) { + ASSERT(szIn == szOut); + memcpy(*ppOut, pIn, szOut); + } else { + if (cmprAlg == TWO_STAGE_COMP) { + code = tRealloc(ppBuf, szOut + COMP_OVERFLOW_BYTES); + if (code) goto _exit; + } + + int32_t size = tDataTypes[type].decompFunc(pIn, szIn, szOut / tDataTypes[type].bytes, *ppOut, szOut, cmprAlg, + *ppBuf, szOut + COMP_OVERFLOW_BYTES); + if (size <= 0) { + code = TSDB_CODE_COMPRESS_ERROR; + goto _exit; + } + + ASSERT(size == szOut); + } + +_exit: + return code; +} + +int32_t tsdbCmprColData(SColData *pColData, int8_t cmprAlg, SBlockCol *pBlockCol, uint8_t **ppOut, int32_t nOut, + uint8_t **ppBuf) { + int32_t code = 0; + + ASSERT(pColData->flag && (pColData->flag != HAS_NONE) && (pColData->flag != HAS_NULL)); + + pBlockCol->szBitmap = 0; + pBlockCol->szOffset = 0; + pBlockCol->szValue = 0; + + int32_t size = 0; + // bitmap + if (pColData->flag != HAS_VALUE) { + uint8_t *pBitMap = pColData->pBitMap; + int32_t szBitMap = BIT2_SIZE(pColData->nVal); + + // BIT2 to BIT1 + if (pColData->flag != (HAS_VALUE | HAS_NULL | HAS_NONE)) { + szBitMap = BIT1_SIZE(pColData->nVal); + pBitMap = taosMemoryCalloc(1, szBitMap); + if (pBitMap == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + + for (int32_t iVal = 0; iVal < pColData->nVal; iVal++) { + uint8_t v = GET_BIT2(pColData->pBitMap, iVal); + switch (pColData->flag) { + case (HAS_NULL | HAS_NONE): + SET_BIT1(pBitMap, iVal, v); + break; + case (HAS_VALUE | HAS_NONE): + if (v) { + SET_BIT1(pBitMap, iVal, 1); + } else { + SET_BIT1(pBitMap, iVal, 0); + } + break; + case (HAS_VALUE | HAS_NULL): + SET_BIT1(pBitMap, iVal, v - 1); + break; + default: + ASSERT(0); + } + } + } + + code = tsdbCmprData(pBitMap, szBitMap, TSDB_DATA_TYPE_TINYINT, cmprAlg, ppOut, nOut + size, &pBlockCol->szBitmap, + ppBuf); + if (code) goto _exit; + + if (pColData->flag != (HAS_VALUE | HAS_NULL | HAS_NONE)) { + taosMemoryFree(pBitMap); + } + } + size += pBlockCol->szBitmap; + + // offset + if (IS_VAR_DATA_TYPE(pColData->type)) { + code = tsdbCmprData((uint8_t *)pColData->aOffset, sizeof(int32_t) * pColData->nVal, TSDB_DATA_TYPE_INT, cmprAlg, + ppOut, nOut + size, &pBlockCol->szOffset, ppBuf); + if (code) goto _exit; + } + size += pBlockCol->szOffset; + + // value + if (pColData->flag != (HAS_NULL | HAS_NONE)) { + code = tsdbCmprData((uint8_t *)pColData->pData, pColData->nData, pColData->type, cmprAlg, ppOut, nOut + size, + &pBlockCol->szValue, ppBuf); + if (code) goto _exit; + } + size += pBlockCol->szValue; + + // checksum + size += sizeof(TSCKSUM); + code = tRealloc(ppOut, nOut + size); + if (code) goto _exit; + taosCalcChecksumAppend(0, *ppOut + nOut, size); + +_exit: + return code; +} + +int32_t tsdbDecmprColData(uint8_t *pIn, SBlockCol *pBlockCol, int8_t cmprAlg, int32_t nVal, SColData *pColData, + uint8_t **ppBuf) { + int32_t code = 0; + + int32_t size = pBlockCol->szBitmap + pBlockCol->szOffset + pBlockCol->szValue + sizeof(TSCKSUM); + if (!taosCheckChecksumWhole(pIn, size)) { + code = TSDB_CODE_FILE_CORRUPTED; + goto _exit; + } + + ASSERT(pColData->cid == pBlockCol->cid); + ASSERT(pColData->type == pBlockCol->type); + pColData->smaOn = pBlockCol->smaOn; + pColData->flag = pBlockCol->flag; + pColData->nVal = nVal; + pColData->nData = pBlockCol->szOrigin; + + uint8_t *p = pIn; + // bitmap + if (pBlockCol->szBitmap) { + if (pBlockCol->flag != (HAS_VALUE | HAS_NULL | HAS_NONE)) { + uint8_t *pBitMap = NULL; + code = tsdbDecmprData(p, pBlockCol->szBitmap, TSDB_DATA_TYPE_TINYINT, cmprAlg, &pBitMap, + BIT1_SIZE(pColData->nVal), ppBuf); + if (code) goto _exit; + + code = tRealloc(&pColData->pBitMap, BIT2_SIZE(pColData->nVal)); + if (code) { + tFree(pBitMap); + goto _exit; + } + + // BIT1 to BIT2 + for (int32_t iVal = 0; iVal < nVal; iVal++) { + uint8_t v = GET_BIT1(pBitMap, iVal); + switch (pBlockCol->flag) { + case (HAS_NULL | HAS_NONE): + SET_BIT2(pColData->pBitMap, iVal, v); + break; + case (HAS_VALUE | HAS_NONE): + if (v) { + SET_BIT2(pColData->pBitMap, iVal, 2); + } else { + SET_BIT2(pColData->pBitMap, iVal, 0); + } + break; + case (HAS_VALUE | HAS_NULL): + SET_BIT2(pColData->pBitMap, iVal, v + 1); + break; + default: + ASSERT(0); + } + } + + tFree(pBitMap); + } else { + code = tsdbDecmprData(p, pBlockCol->szBitmap, TSDB_DATA_TYPE_TINYINT, cmprAlg, &pColData->pBitMap, + BIT2_SIZE(pColData->nVal), ppBuf); + if (code) goto _exit; + } + } + p += pBlockCol->szBitmap; + + // offset + if (pBlockCol->szOffset) { + code = tsdbDecmprData(p, pBlockCol->szOffset, TSDB_DATA_TYPE_INT, cmprAlg, (uint8_t **)&pColData->aOffset, + sizeof(int32_t) * pColData->nVal, ppBuf); + if (code) goto _exit; + } + p += pBlockCol->szOffset; + + // value + if (pBlockCol->szValue) { + code = tsdbDecmprData(p, pBlockCol->szValue, pColData->type, cmprAlg, &pColData->pData, pColData->nData, ppBuf); + if (code) goto _exit; + } + p += pBlockCol->szValue; + +_exit: + return code; +} + +int32_t tsdbReadAndCheck(TdFilePtr pFD, int64_t offset, uint8_t **ppOut, int32_t size, int8_t toCheck) { + int32_t code = 0; + + // alloc + code = tRealloc(ppOut, size); + if (code) goto _exit; + + // seek + int64_t n = taosLSeekFile(pFD, offset, SEEK_SET); + if (n < 0) { + code = TAOS_SYSTEM_ERROR(errno); + goto _exit; + } + + // read + n = taosReadFile(pFD, *ppOut, size); + if (n < 0) { + code = TAOS_SYSTEM_ERROR(errno); + goto _exit; + } else if (n < size) { + code = TSDB_CODE_FILE_CORRUPTED; + goto _exit; + } + + // check + if (toCheck && !taosCheckChecksumWhole(*ppOut, size)) { + code = TSDB_CODE_FILE_CORRUPTED; + goto _exit; + } + +_exit: + return code; +} diff --git a/source/dnode/vnode/src/vnd/vnodeBufPool.c b/source/dnode/vnode/src/vnd/vnodeBufPool.c index 0623b3bd1075d34c35c1cfce1ac94218e0f59320..5a22114ab42206f0f63a4c41a3d4c53c438ff68b 100644 --- a/source/dnode/vnode/src/vnd/vnodeBufPool.c +++ b/source/dnode/vnode/src/vnd/vnodeBufPool.c @@ -78,7 +78,7 @@ void vnodeBufPoolReset(SVBufPool *pPool) { void *vnodeBufPoolMalloc(SVBufPool *pPool, int size) { SVBufPoolNode *pNode; void *p; - + taosThreadSpinLock(&pPool->lock); if (pPool->node.size >= pPool->ptr - pPool->node.data + size) { // allocate from the anchor node p = pPool->ptr; @@ -89,6 +89,7 @@ void *vnodeBufPoolMalloc(SVBufPool *pPool, int size) { pNode = taosMemoryMalloc(sizeof(*pNode) + size); if (pNode == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; + taosThreadSpinUnlock(&pPool->lock); return NULL; } @@ -101,7 +102,7 @@ void *vnodeBufPoolMalloc(SVBufPool *pPool, int size) { pPool->size = pPool->size + sizeof(*pNode) + size; } - + taosThreadSpinUnlock(&pPool->lock); return p; } @@ -129,6 +130,12 @@ static int vnodeBufPoolCreate(SVnode *pVnode, int64_t size, SVBufPool **ppPool) return -1; } + if (taosThreadSpinInit(&pPool->lock, 0) != 0) { + taosMemoryFree(pPool); + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + pPool->next = NULL; pPool->pVnode = pVnode; pPool->nRef = 0; @@ -145,6 +152,7 @@ static int vnodeBufPoolCreate(SVnode *pVnode, int64_t size, SVBufPool **ppPool) static int vnodeBufPoolDestroy(SVBufPool *pPool) { vnodeBufPoolReset(pPool); + taosThreadSpinDestroy(&pPool->lock); taosMemoryFree(pPool); return 0; } diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c index 4418ce20e88b8c461e55fbe0d7b4a8348e032379..580ab8bc93cac3a5057821f238cf85fc1011fa38 100644 --- a/source/dnode/vnode/src/vnd/vnodeCfg.c +++ b/source/dnode/vnode/src/vnd/vnodeCfg.c @@ -117,6 +117,7 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) { if (tjsonAddIntegerToObject(pJson, "vndStats.ctables", pCfg->vndStats.numOfCTables) < 0) return -1; if (tjsonAddIntegerToObject(pJson, "vndStats.ntables", pCfg->vndStats.numOfNTables) < 0) return -1; if (tjsonAddIntegerToObject(pJson, "vndStats.timeseries", pCfg->vndStats.numOfTimeSeries) < 0) return -1; + if (tjsonAddIntegerToObject(pJson, "vndStats.ntimeseries", pCfg->vndStats.numOfNTimeSeries) < 0) return -1; SJson *pNodeInfoArr = tjsonCreateArray(); tjsonAddItemToObject(pJson, "syncCfg.nodeInfo", pNodeInfoArr); @@ -224,6 +225,8 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) { if (code < 0) return -1; tjsonGetNumberValue(pJson, "vndStats.timeseries", pCfg->vndStats.numOfTimeSeries, code); if (code < 0) return -1; + tjsonGetNumberValue(pJson, "vndStats.ntimeseries", pCfg->vndStats.numOfNTimeSeries, code); + if (code < 0) return -1; SJson *pNodeInfoArr = tjsonGetObjectItem(pJson, "syncCfg.nodeInfo"); int arraySize = tjsonGetArraySize(pNodeInfoArr); diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index c8dc07af0a040bc7b436ee73b6743e8379c9de23..8c73499229b89d67edbc21f86ff7f76b570a4275 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -220,9 +220,6 @@ int vnodeCommit(SVnode *pVnode) { vInfo("vgId:%d, start to commit, commit ID:%" PRId64 " version:%" PRId64, TD_VID(pVnode), pVnode->state.commitID, pVnode->state.applied); - vnodeBufPoolUnRef(pVnode->inUse); - pVnode->inUse = NULL; - pVnode->state.commitTerm = pVnode->state.applyTerm; // save info @@ -239,7 +236,13 @@ int vnodeCommit(SVnode *pVnode) { // preCommit // smaSyncPreCommit(pVnode->pSma); - smaAsyncPreCommit(pVnode->pSma); + if(smaAsyncPreCommit(pVnode->pSma) < 0){ + ASSERT(0); + return -1; + } + + vnodeBufPoolUnRef(pVnode->inUse); + pVnode->inUse = NULL; // commit each sub-system if (metaCommit(pVnode->pMeta) < 0) { @@ -248,7 +251,10 @@ int vnodeCommit(SVnode *pVnode) { } if (VND_IS_RSMA(pVnode)) { - smaAsyncCommit(pVnode->pSma); + if (smaAsyncCommit(pVnode->pSma) < 0) { + ASSERT(0); + return -1; + } if (tsdbCommit(VND_RSMA0(pVnode)) < 0) { ASSERT(0); @@ -285,7 +291,10 @@ int vnodeCommit(SVnode *pVnode) { // postCommit // smaSyncPostCommit(pVnode->pSma); - smaAsyncPostCommit(pVnode->pSma); + if (smaAsyncPostCommit(pVnode->pSma) < 0) { + ASSERT(0); + return -1; + } // apply the commit (TODO) walEndSnapshot(pVnode->pWal); diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index d55f1796ad6a926e1b63d96d3f99234c27ba9a3c..8d799e919d1e4c06cfec6438d7a4a34fc336993d 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -424,6 +424,25 @@ int32_t vnodeGetCtbIdList(SVnode *pVnode, int64_t suid, SArray *list) { return TSDB_CODE_SUCCESS; } +int32_t vnodeGetStbIdList(SVnode* pVnode, int64_t suid, SArray* list) { + SMStbCursor* pCur = metaOpenStbCursor(pVnode->pMeta, suid); + if (!pCur) { + return TSDB_CODE_FAILED; + } + + while (1) { + tb_uid_t id = metaStbCursorNext(pCur); + if (id == 0) { + break; + } + + taosArrayPush(list, &id); + } + + metaCloseStbCursor(pCur); + return TSDB_CODE_SUCCESS; +} + int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) { SMCtbCursor *pCur = metaOpenCtbCursor(pVnode->pMeta, suid); if (!pCur) { diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index d5c5e186681d129d4e3b94dad53939637d22cdaf..85feecff1a0247790eec14b503b7dda6247a6c87 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -196,36 +196,42 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp break; /* TQ */ case TDMT_VND_MQ_VG_CHANGE: - if (tqProcessVgChangeReq(pVnode->pTq, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), + if (tqProcessVgChangeReq(pVnode->pTq, version, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), pMsg->contLen - sizeof(SMsgHead)) < 0) { goto _err; } break; case TDMT_VND_MQ_VG_DELETE: - if (tqProcessVgDeleteReq(pVnode->pTq, pMsg->pCont, pMsg->contLen) < 0) { + if (tqProcessVgDeleteReq(pVnode->pTq, version, pMsg->pCont, pMsg->contLen) < 0) { goto _err; } break; case TDMT_VND_MQ_COMMIT_OFFSET: - if (tqProcessOffsetCommitReq(pVnode->pTq, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), - pMsg->contLen - sizeof(SMsgHead), version) < 0) { + if (tqProcessOffsetCommitReq(pVnode->pTq, version, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), + pMsg->contLen - sizeof(SMsgHead)) < 0) { goto _err; } break; - case TDMT_VND_CHECK_ALTER_INFO: - if (tqProcessCheckAlterInfoReq(pVnode->pTq, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), - pMsg->contLen - sizeof(SMsgHead)) < 0) { + case TDMT_VND_ADD_CHECK_INFO: + if (tqProcessAddCheckInfoReq(pVnode->pTq, version, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), + pMsg->contLen - sizeof(SMsgHead)) < 0) { + goto _err; + } + break; + case TDMT_VND_DELETE_CHECK_INFO: + if (tqProcessDelCheckInfoReq(pVnode->pTq, version, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), + pMsg->contLen - sizeof(SMsgHead)) < 0) { goto _err; } break; case TDMT_STREAM_TASK_DEPLOY: { - if (tqProcessTaskDeployReq(pVnode->pTq, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), + if (tqProcessTaskDeployReq(pVnode->pTq, version, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), pMsg->contLen - sizeof(SMsgHead)) < 0) { goto _err; } } break; case TDMT_STREAM_TASK_DROP: { - if (tqProcessTaskDropReq(pVnode->pTq, pMsg->pCont, pMsg->contLen) < 0) { + if (tqProcessTaskDropReq(pVnode->pTq, version, pMsg->pCont, pMsg->contLen) < 0) { goto _err; } } break; @@ -247,6 +253,8 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp vTrace("vgId:%d, process %s request success, index:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), version); + walApplyVer(pVnode->pWal, version); + if (tqPushMsg(pVnode->pTq, pMsg->pCont, pMsg->contLen, pMsg->msgType, version) < 0) { vError("vgId:%d, failed to push msg to TQ since %s", TD_VID(pVnode), tstrerror(terrno)); return -1; @@ -281,7 +289,7 @@ int32_t vnodePreprocessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { vTrace("message in vnode query queue is processing"); - if ((pMsg->msgType == TDMT_SCH_QUERY) && !vnodeIsLeader(pVnode)) { + if ((pMsg->msgType == TDMT_SCH_QUERY) && !vnodeIsReadyForRead(pVnode)) { vnodeRedirectRpcMsg(pVnode, pMsg); return 0; } @@ -293,8 +301,6 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg, 0); case TDMT_SCH_QUERY_CONTINUE: return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg, 0); - case TDMT_VND_FETCH_RSMA: - return smaProcessFetch(pVnode->pSma, pMsg); default: vError("unknown msg type:%d in query queue", pMsg->msgType); return TSDB_CODE_VND_APP_ERROR; @@ -305,7 +311,7 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { vTrace("vgId:%d, msg:%p in fetch queue is processing", pVnode->config.vgId, pMsg); if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META || pMsg->msgType == TDMT_VND_TABLE_CFG || pMsg->msgType == TDMT_VND_BATCH_META) && - !vnodeIsLeader(pVnode)) { + !vnodeIsReadyForRead(pVnode)) { vnodeRedirectRpcMsg(pVnode, pMsg); return 0; } @@ -362,6 +368,10 @@ void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) { } void vnodeUpdateMetaRsp(SVnode *pVnode, STableMetaRsp *pMetaRsp) { + if (NULL == pMetaRsp) { + return; + } + strcpy(pMetaRsp->dbFName, pVnode->config.dbname); pMetaRsp->dbId = pVnode->config.dbId; pMetaRsp->vgId = TD_VID(pVnode); @@ -372,14 +382,14 @@ static int32_t vnodeProcessTrimReq(SVnode *pVnode, int64_t version, void *pReq, int32_t code = 0; SVTrimDbReq trimReq = {0}; - vInfo("vgId:%d, trim vnode request will be processed, time:%d", pVnode->config.vgId, trimReq.timestamp); - // decode if (tDeserializeSVTrimDbReq(pReq, len, &trimReq) != 0) { code = TSDB_CODE_INVALID_MSG; goto _exit; } + vInfo("vgId:%d, trim vnode request will be processed, time:%d", pVnode->config.vgId, trimReq.timestamp); + // process code = tsdbDoRetention(pVnode->pTsdb, trimReq.timestamp); if (code) goto _exit; @@ -506,7 +516,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR } // do create table - if (metaCreateTable(pVnode->pMeta, version, pCreateReq) < 0) { + if (metaCreateTable(pVnode->pMeta, version, pCreateReq, &cRsp.pMeta) < 0) { if (pCreateReq->flags & TD_CREATE_IF_NOT_EXISTS && terrno == TSDB_CODE_TDB_TABLE_ALREADY_EXIST) { cRsp.code = TSDB_CODE_SUCCESS; } else { @@ -516,13 +526,16 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR cRsp.code = TSDB_CODE_SUCCESS; tdFetchTbUidList(pVnode->pSma, &pStore, pCreateReq->ctb.suid, pCreateReq->uid); taosArrayPush(tbUids, &pCreateReq->uid); + vnodeUpdateMetaRsp(pVnode, cRsp.pMeta); } taosArrayPush(rsp.pArray, &cRsp); } tqUpdateTbUidList(pVnode->pTq, tbUids, true); - tdUpdateTbUidList(pVnode->pSma, pStore); + if (tdUpdateTbUidList(pVnode->pSma, pStore) < 0) { + goto _exit; + } tdUidStoreFree(pStore); // prepare rsp @@ -542,7 +555,7 @@ _exit: pCreateReq = req.pReqs + iReq; taosArrayDestroy(pCreateReq->ctb.tagName); } - taosArrayDestroy(rsp.pArray); + taosArrayDestroyEx(rsp.pArray, tFreeSVCreateTbRsp); taosArrayDestroy(tbUids); tDecoderClear(&decoder); tEncoderClear(&encoder); @@ -854,7 +867,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq goto _exit; } - if (metaCreateTable(pVnode->pMeta, version, &createTbReq) < 0) { + if (metaCreateTable(pVnode->pMeta, version, &createTbReq, &submitBlkRsp.pMeta) < 0) { if (terrno != TSDB_CODE_TDB_TABLE_ALREADY_EXIST) { submitBlkRsp.code = terrno; pRsp->code = terrno; @@ -862,12 +875,16 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq taosArrayDestroy(createTbReq.ctb.tagName); goto _exit; } + } else { + if (NULL != submitBlkRsp.pMeta) { + vnodeUpdateMetaRsp(pVnode, submitBlkRsp.pMeta); + } } taosArrayPush(newTbUids, &createTbReq.uid); submitBlkRsp.uid = createTbReq.uid; submitBlkRsp.tblFName = taosMemoryMalloc(strlen(pVnode->config.dbname) + strlen(createTbReq.name) + 2); - sprintf(submitBlkRsp.tblFName, "%s.", pVnode->config.dbname); + sprintf(submitBlkRsp.tblFName, "%s.%s", pVnode->config.dbname, createTbReq.name); msgIter.uid = createTbReq.uid; if (createTbReq.type == TSDB_CHILD_TABLE) { @@ -905,11 +922,7 @@ _exit: tEncodeSSubmitRsp(&encoder, &submitRsp); tEncoderClear(&encoder); - for (int32_t i = 0; i < taosArrayGetSize(submitRsp.pArray); i++) { - taosMemoryFree(((SSubmitBlkRsp *)taosArrayGet(submitRsp.pArray, i))[0].tblFName); - } - - taosArrayDestroy(submitRsp.pArray); + taosArrayDestroyEx(submitRsp.pArray, tFreeSSubmitBlkRsp); // TODO: the partial success scenario and the error case // => If partial success, extract the success submitted rows and reconstruct a new submit msg, and push to level diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index 9703ed27ae8e260c813eec35f17be51cd2113d20..65d4e9aaf10cf9c0ee723eb70b94166cde1fba0d 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -764,6 +764,8 @@ void vnodeSyncStart(SVnode *pVnode) { void vnodeSyncClose(SVnode *pVnode) { syncStop(pVnode->sync); } +bool vnodeIsRoleLeader(SVnode *pVnode) { return syncGetMyRole(pVnode->sync) == TAOS_SYNC_STATE_LEADER; } + bool vnodeIsLeader(SVnode *pVnode) { if (!syncIsReady(pVnode->sync)) { vDebug("vgId:%d, vnode not ready, state:%s, restore:%d", pVnode->config.vgId, syncGetMyRoleStr(pVnode->sync), @@ -779,3 +781,17 @@ bool vnodeIsLeader(SVnode *pVnode) { return true; } + +bool vnodeIsReadyForRead(SVnode *pVnode) { + if (syncIsReady(pVnode->sync)) { + return true; + } + + if (syncIsReadyForRead(pVnode->sync)) { + return true; + } + + vDebug("vgId:%d, vnode not ready for read, state:%s, last:%ld, cmt:%ld", pVnode->config.vgId, + syncGetMyRoleStr(pVnode->sync), syncGetLastIndex(pVnode->sync), syncGetCommitIndex(pVnode->sync)); + return false; +} diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index 777dcd0592ae69de003d5df0d1d9d2592302d195..9b62581051daac9c232409c0cb30d379e3a4d596 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -188,7 +188,7 @@ typedef struct SCtgTbCache { typedef struct SCtgVgCache { SRWLatch vgLock; - SDBVgInfo *vgInfo; + SDBVgInfo *vgInfo; } SCtgVgCache; typedef struct SCtgDBCache { @@ -224,7 +224,7 @@ typedef struct SCtgUserAuth { } SCtgUserAuth; typedef struct SCatalog { - uint64_t clusterId; + uint64_t clusterId; SHashObj *userCache; //key:user, value:SCtgUserAuth SHashObj *dbCache; //key:dbname, value:SCtgDBCache SCtgRentMgmt dbRent; @@ -253,9 +253,9 @@ typedef struct SCtgJob { int32_t jobResCode; int32_t taskIdx; SRWLatch taskLock; - + uint64_t queryId; - SCatalog* pCtg; + SCatalog* pCtg; SRequestConnInfo conn; void* userParam; catalogCallback userFp; @@ -279,7 +279,7 @@ typedef struct SCtgMsgCtx { void* lastOut; void* out; char* target; - SHashObj* pBatchs; + SHashObj* pBatchs; } SCtgMsgCtx; @@ -364,7 +364,7 @@ typedef struct SCtgCacheStat { uint64_t numOfMetaHit; uint64_t numOfMetaMiss; uint64_t numOfIndexHit; - uint64_t numOfIndexMiss; + uint64_t numOfIndexMiss; uint64_t numOfUserHit; uint64_t numOfUserMiss; uint64_t numOfClear; @@ -451,7 +451,7 @@ typedef struct SCtgCacheOperation { int32_t opId; void *data; bool syncOp; - tsem_t rspSem; + tsem_t rspSem; bool stopQueue; bool unLocked; } SCtgCacheOperation; @@ -466,7 +466,7 @@ typedef struct SCtgQueue { bool stopQueue; SCtgQNode *head; SCtgQNode *tail; - tsem_t reqSem; + tsem_t reqSem; uint64_t qRemainNum; } SCtgQueue; @@ -475,7 +475,7 @@ typedef struct SCatalogMgmt { int32_t jobPool; SRWLatch lock; SCtgQueue queue; - TdThread updateThread; + TdThread updateThread; SHashObj *pCluster; //key: clusterId, value: SCatalog* SCatalogStat stat; SCatalogCfg cfg; @@ -528,8 +528,8 @@ typedef struct SCtgOperation { #define CTG_META_SIZE(pMeta) (sizeof(STableMeta) + ((pMeta)->tableInfo.numOfTags + (pMeta)->tableInfo.numOfColumns) * sizeof(SSchema)) -#define CTG_TABLE_NOT_EXIST(code) (code == CTG_ERR_CODE_TABLE_NOT_EXIST) -#define CTG_DB_NOT_EXIST(code) (code == TSDB_CODE_MND_DB_NOT_EXIST) +#define CTG_TABLE_NOT_EXIST(code) (code == CTG_ERR_CODE_TABLE_NOT_EXIST) +#define CTG_DB_NOT_EXIST(code) (code == TSDB_CODE_MND_DB_NOT_EXIST) #define ctgFatal(param, ...) qFatal("CTG:%p " param, pCtg, __VA_ARGS__) #define ctgError(param, ...) qError("CTG:%p " param, pCtg, __VA_ARGS__) @@ -576,7 +576,7 @@ typedef struct SCtgOperation { } \ } while (0) - + #define CTG_ERR_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; return _code; } } while (0) #define CTG_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; } return _code; } while (0) #define CTG_ERR_JRET(c) do { code = c; if (code != TSDB_CODE_SUCCESS) { terrno = code; goto _return; } } while (0) diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c index 933e65e582274711ad194d6a74ca5cbec682ef49..7b32eadcd415116f67db8526449c8a6759f45bcd 100644 --- a/source/libs/catalog/src/catalog.c +++ b/source/libs/catalog/src/catalog.c @@ -270,13 +270,22 @@ int32_t ctgUpdateTbMeta(SCatalog* pCtg, STableMetaRsp* rspMsg, bool syncOp) { int32_t code = 0; strcpy(output->dbFName, rspMsg->dbFName); - strcpy(output->tbName, rspMsg->tbName); output->dbId = rspMsg->dbId; - SET_META_TYPE_TABLE(output->metaType); + if (TSDB_CHILD_TABLE == rspMsg->tableType && NULL == rspMsg->pSchemas) { + strcpy(output->ctbName, rspMsg->tbName); - CTG_ERR_JRET(queryCreateTableMetaFromMsg(rspMsg, rspMsg->tableType == TSDB_SUPER_TABLE, &output->tbMeta)); + SET_META_TYPE_CTABLE(output->metaType); + + CTG_ERR_JRET(queryCreateCTableMetaFromMsg(rspMsg, &output->ctbMeta)); + } else { + strcpy(output->tbName, rspMsg->tbName); + + SET_META_TYPE_TABLE(output->metaType); + + CTG_ERR_JRET(queryCreateTableMetaFromMsg(rspMsg, rspMsg->tableType == TSDB_SUPER_TABLE, &output->tbMeta)); + } CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, output, syncOp)); @@ -893,7 +902,7 @@ int32_t catalogChkTbMetaVersion(SCatalog* pCtg, SRequestConnInfo *pConn, SArray* CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } - SName name; + SName name = {0}; int32_t sver = 0; int32_t tver = 0; int32_t tbNum = taosArrayGetSize(pTables); diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index 64ca85edf45ac515bd7728883c171b04c399d148..585b33930c2cae0332ee77a3933d5a86288c77bc 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -39,7 +39,7 @@ int32_t ctgInitGetTbMetaTask(SCtgJob *pJob, int32_t taskIdx, void* param) { taosMemoryFree(task.taskCtx); CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } - + memcpy(ctx->pName, name, sizeof(*name)); ctx->flag = CTG_FLAG_UNKNOWN_STB; @@ -69,7 +69,7 @@ int32_t ctgInitGetTbMetasTask(SCtgJob *pJob, int32_t taskIdx, void* param) { taosArrayPush(pJob->pTasks, &task); - qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%d, tbNum:%d", + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%d, tbNum:%d", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), taosArrayGetSize(ctx->pNames), pJob->tbMetaNum); return TSDB_CODE_SUCCESS; @@ -89,7 +89,7 @@ int32_t ctgInitGetDbVgTask(SCtgJob *pJob, int32_t taskIdx, void* param) { } SCtgDbVgCtx* ctx = task.taskCtx; - + memcpy(ctx->dbFName, dbFName, sizeof(ctx->dbFName)); taosArrayPush(pJob->pTasks, &task); @@ -113,7 +113,7 @@ int32_t ctgInitGetDbCfgTask(SCtgJob *pJob, int32_t taskIdx, void* param) { } SCtgDbCfgCtx* ctx = task.taskCtx; - + memcpy(ctx->dbFName, dbFName, sizeof(ctx->dbFName)); taosArrayPush(pJob->pTasks, &task); @@ -137,7 +137,7 @@ int32_t ctgInitGetDbInfoTask(SCtgJob *pJob, int32_t taskIdx, void* param) { } SCtgDbInfoCtx* ctx = task.taskCtx; - + memcpy(ctx->dbFName, dbFName, sizeof(ctx->dbFName)); taosArrayPush(pJob->pTasks, &task); @@ -167,7 +167,7 @@ int32_t ctgInitGetTbHashTask(SCtgJob *pJob, int32_t taskIdx, void* param) { taosMemoryFree(task.taskCtx); CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } - + memcpy(ctx->pName, name, sizeof(*name)); tNameGetFullDbName(ctx->pName, ctx->dbFName); @@ -197,7 +197,7 @@ int32_t ctgInitGetTbHashsTask(SCtgJob *pJob, int32_t taskIdx, void* param) { taosArrayPush(pJob->pTasks, &task); - qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%d, tbNum:%d", + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%d, tbNum:%d", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), taosArrayGetSize(ctx->pNames), pJob->tbHashNum); return TSDB_CODE_SUCCESS; @@ -248,7 +248,7 @@ int32_t ctgInitGetIndexTask(SCtgJob *pJob, int32_t taskIdx, void* param) { } SCtgIndexCtx* ctx = task.taskCtx; - + strcpy(ctx->indexFName, name); taosArrayPush(pJob->pTasks, &task); @@ -272,7 +272,7 @@ int32_t ctgInitGetUdfTask(SCtgJob *pJob, int32_t taskIdx, void* param) { } SCtgUdfCtx* ctx = task.taskCtx; - + strcpy(ctx->udfName, name); taosArrayPush(pJob->pTasks, &task); @@ -296,7 +296,7 @@ int32_t ctgInitGetUserTask(SCtgJob *pJob, int32_t taskIdx, void* param) { } SCtgUserCtx* ctx = task.taskCtx; - + memcpy(&ctx->user, user, sizeof(*user)); taosArrayPush(pJob->pTasks, &task); @@ -339,7 +339,7 @@ int32_t ctgInitGetTbIndexTask(SCtgJob *pJob, int32_t taskIdx, void* param) { taosMemoryFree(task.taskCtx); CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } - + memcpy(ctx->pName, name, sizeof(*name)); taosArrayPush(pJob->pTasks, &task); @@ -368,7 +368,7 @@ int32_t ctgInitGetTbCfgTask(SCtgJob *pJob, int32_t taskIdx, void* param) { taosMemoryFree(task.taskCtx); CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } - + memcpy(ctx->pName, name, sizeof(*name)); taosArrayPush(pJob->pTasks, &task); @@ -387,7 +387,7 @@ int32_t ctgHandleForceUpdate(SCatalog* pCtg, int32_t taskNum, SCtgJob *pJob, con taosHashCleanup(pTb); CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } - + for (int32_t i = 0; i < pJob->dbVgNum; ++i) { char* dbFName = taosArrayGet(pReq->pDbVgroup, i); taosHashPut(pDb, dbFName, strlen(dbFName), dbFName, TSDB_DB_FNAME_LEN); @@ -474,7 +474,7 @@ int32_t ctgInitTask(SCtgJob *pJob, CTG_TASK_TYPE type, void* param, int32_t *tas if (taskId) { *taskId = tid; } - + return TSDB_CODE_SUCCESS; } @@ -510,7 +510,7 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, const pJob->pCtg = pCtg; pJob->conn = *pConn; pJob->userParam = param; - + pJob->tbMetaNum = tbMetaNum; pJob->tbHashNum = tbHashNum; pJob->qnodeNum = qnodeNum; @@ -844,20 +844,20 @@ int32_t ctgDumpSvrVer(SCtgTask* pTask) { pJob->jobRes.pSvrVer->code = pTask->code; pJob->jobRes.pSvrVer->pRes = pTask->res; - + return TSDB_CODE_SUCCESS; } int32_t ctgCallSubCb(SCtgTask *pTask) { int32_t code = 0; - + CTG_LOCK(CTG_WRITE, &pTask->lock); - + int32_t parentNum = taosArrayGetSize(pTask->pParents); for (int32_t i = 0; i < parentNum; ++i) { SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); SCtgTask* pParent = taosArrayGetP(pTask->pParents, i); - + pParent->subRes.code = pTask->code; if (TSDB_CODE_SUCCESS == pTask->code) { code = (*gCtgAsyncFps[pTask->type].cloneFp)(pTask, &pParent->subRes.res); @@ -868,22 +868,22 @@ int32_t ctgCallSubCb(SCtgTask *pTask) { SCtgMsgCtx *pParMsgCtx = CTG_GET_TASK_MSGCTX(pParent, -1); - pParMsgCtx->pBatchs = pMsgCtx->pBatchs; + pParMsgCtx->pBatchs = pMsgCtx->pBatchs; CTG_ERR_JRET(pParent->subRes.fp(pParent)); } - + _return: CTG_UNLOCK(CTG_WRITE, &pTask->lock); - CTG_RET(code); + CTG_RET(code); } int32_t ctgCallUserCb(void* param) { SCtgJob* pJob = (SCtgJob*)param; qDebug("QID:0x%" PRIx64 " ctg start to call user cb with rsp %s", pJob->queryId, tstrerror(pJob->jobResCode)); - + (*pJob->userFp)(&pJob->jobRes, pJob->userParam, pJob->jobResCode); qDebug("QID:0x%" PRIx64 " ctg end to call user cb", pJob->queryId); @@ -922,9 +922,9 @@ _return: //taosSsleep(2); //qDebug("QID:0x%" PRIx64 " ctg after sleep", pJob->queryId); - + taosAsyncExec(ctgCallUserCb, pJob, NULL); - + CTG_RET(code); } @@ -932,7 +932,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf int32_t code = 0; SCtgDBCache *dbCache = NULL; SCtgTask* pTask = tReq->pTask; - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx); SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx; @@ -958,38 +958,38 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf } case TDMT_MND_TABLE_META: { STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out; - + if (CTG_IS_META_NULL(pOut->metaType)) { if (CTG_FLAG_IS_STB(flag)) { char dbFName[TSDB_DB_FNAME_LEN] = {0}; tNameGetFullDbName(pName, dbFName); - + CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache)); if (NULL != dbCache) { SVgroupInfo vgInfo = {0}; CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo)); - + ctgDebug("will refresh tbmeta, supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag); - *vgId = vgInfo.vgId; + *vgId = vgInfo.vgId; CTG_ERR_JRET(ctgGetTbMetaFromVnode(pCtg, pConn, pName, &vgInfo, NULL, tReq)); ctgReleaseVgInfoToCache(pCtg, dbCache); } else { SBuildUseDBInput input = {0}; - + tstrncpy(input.db, dbFName, tListLen(input.db)); input.vgVersion = CTG_DEFAULT_INVALID_VERSION; - + CTG_ERR_JRET(ctgGetDBVgInfoFromMnode(pCtg, pConn, &input, NULL, tReq)); } return TSDB_CODE_SUCCESS; } - + ctgError("no tbmeta got, tbName:%s", tNameGetTableName(pName)); ctgRemoveTbMetaFromCache(pCtg, pName, false); - + CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST); } @@ -998,12 +998,12 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf STableMetaOutput* pLastOut = (STableMetaOutput*)pMsgCtx->out; TSWAP(pLastOut->tbMeta, pOut->tbMeta); } - + break; } case TDMT_VND_TABLE_META: { STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out; - + if (CTG_IS_META_NULL(pOut->metaType)) { ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(pName)); ctgRemoveTbMetaFromCache(pCtg, pName, false); @@ -1013,12 +1013,12 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf if (CTG_FLAG_IS_STB(flag)) { break; } - + if (CTG_IS_META_TABLE(pOut->metaType) && TSDB_SUPER_TABLE == pOut->tbMeta->tableType) { ctgDebug("will continue to refresh tbmeta since got stb, tbName:%s", tNameGetTableName(pName)); - + taosMemoryFreeClear(pOut->tbMeta); - + CTG_RET(ctgGetTbMetaFromMnode(pCtg, pConn, pName, NULL, tReq)); } else if (CTG_IS_META_BOTH(pOut->metaType)) { int32_t exist = 0; @@ -1029,13 +1029,13 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf stbCtx.flag = flag; stbCtx.pName = &stbName; - taosMemoryFreeClear(pOut->tbMeta); + taosMemoryFreeClear(pOut->tbMeta); CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta)); if (pOut->tbMeta) { exist = 1; } } - + if (0 == exist) { TSWAP(pMsgCtx->lastOut, pMsgCtx->out); CTG_RET(ctgGetTbMetaFromMnodeImpl(pCtg, pConn, pOut->dbFName, pOut->tbName, NULL, tReq)); @@ -1056,7 +1056,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf if (CTG_IS_META_BOTH(pOut->metaType)) { memcpy(pOut->tbMeta, &pOut->ctbMeta, sizeof(pOut->ctbMeta)); } - + /* else if (CTG_IS_META_CTABLE(pOut->metaType)) { SName stbName = *pName; @@ -1064,7 +1064,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf SCtgTbMetaCtx stbCtx = {0}; stbCtx.flag = flag; stbCtx.pName = &stbName; - + CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta)); if (NULL == pOut->tbMeta) { ctgDebug("stb no longer exist, stbName:%s", stbName.tname); @@ -1088,7 +1088,7 @@ _return: if (pTask->res || code) { ctgHandleTaskEnd(pTask, code); } - + CTG_RET(code); } @@ -1097,7 +1097,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu int32_t code = 0; SCtgDBCache *dbCache = NULL; SCtgTask* pTask = tReq->pTask; - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx); SCtgTbMetasCtx* ctx = (SCtgTbMetasCtx*)pTask->taskCtx; @@ -1125,38 +1125,38 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu } case TDMT_MND_TABLE_META: { STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out; - + if (CTG_IS_META_NULL(pOut->metaType)) { if (CTG_FLAG_IS_STB(flag)) { char dbFName[TSDB_DB_FNAME_LEN] = {0}; tNameGetFullDbName(pName, dbFName); - + CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache)); if (NULL != dbCache) { SVgroupInfo vgInfo = {0}; CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo)); - + ctgDebug("will refresh tbmeta, supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag); - *vgId = vgInfo.vgId; + *vgId = vgInfo.vgId; CTG_ERR_JRET(ctgGetTbMetaFromVnode(pCtg, pConn, pName, &vgInfo, NULL, tReq)); ctgReleaseVgInfoToCache(pCtg, dbCache); } else { SBuildUseDBInput input = {0}; - + tstrncpy(input.db, dbFName, tListLen(input.db)); input.vgVersion = CTG_DEFAULT_INVALID_VERSION; - + CTG_ERR_JRET(ctgGetDBVgInfoFromMnode(pCtg, pConn, &input, NULL, tReq)); } return TSDB_CODE_SUCCESS; } - + ctgError("no tbmeta got, tbName:%s", tNameGetTableName(pName)); ctgRemoveTbMetaFromCache(pCtg, pName, false); - + CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST); } @@ -1165,12 +1165,12 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu STableMetaOutput* pLastOut = (STableMetaOutput*)pMsgCtx->out; TSWAP(pLastOut->tbMeta, pOut->tbMeta); } - + break; } case TDMT_VND_TABLE_META: { STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out; - + if (CTG_IS_META_NULL(pOut->metaType)) { ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(pName)); ctgRemoveTbMetaFromCache(pCtg, pName, false); @@ -1180,12 +1180,12 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu if (CTG_FLAG_IS_STB(flag)) { break; } - + if (CTG_IS_META_TABLE(pOut->metaType) && TSDB_SUPER_TABLE == pOut->tbMeta->tableType) { ctgDebug("will continue to refresh tbmeta since got stb, tbName:%s", tNameGetTableName(pName)); - + taosMemoryFreeClear(pOut->tbMeta); - + CTG_RET(ctgGetTbMetaFromMnode(pCtg, pConn, pName, NULL, tReq)); } else if (CTG_IS_META_BOTH(pOut->metaType)) { int32_t exist = 0; @@ -1196,14 +1196,14 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu stbCtx.flag = flag; stbCtx.pName = &stbName; - taosMemoryFreeClear(pOut->tbMeta); + taosMemoryFreeClear(pOut->tbMeta); CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta)); if (pOut->tbMeta) { ctgDebug("use cached stb meta, tbName:%s", tNameGetTableName(pName)); exist = 1; } } - + if (0 == exist) { TSWAP(pMsgCtx->lastOut, pMsgCtx->out); CTG_RET(ctgGetTbMetaFromMnodeImpl(pCtg, pConn, pOut->dbFName, pOut->tbName, NULL, tReq)); @@ -1224,7 +1224,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu if (CTG_IS_META_BOTH(pOut->metaType)) { memcpy(pOut->tbMeta, &pOut->ctbMeta, sizeof(pOut->ctbMeta)); } - + /* else if (CTG_IS_META_CTABLE(pOut->metaType)) { SName stbName = *pName; @@ -1232,7 +1232,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu SCtgTbMetaCtx stbCtx = {0}; stbCtx.flag = flag; stbCtx.pName = &stbName; - + CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta)); if (NULL == pOut->tbMeta) { ctgDebug("stb no longer exist, stbName:%s", stbName.tname); @@ -1273,7 +1273,7 @@ _return: if (pTask->res && taskDone) { ctgHandleTaskEnd(pTask, code); } - + CTG_RET(code); } @@ -1282,7 +1282,7 @@ int32_t ctgHandleGetDbVgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf * int32_t code = 0; SCtgTask* pTask = tReq->pTask; SCtgDbVgCtx* ctx = (SCtgDbVgCtx*)pTask->taskCtx; - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); @@ -1290,7 +1290,7 @@ int32_t ctgHandleGetDbVgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf * case TDMT_MND_USE_DB: { SUseDbOutput* pOut = (SUseDbOutput*)pTask->msgCtx.out; SDBVgInfo* pDb = NULL; - + CTG_ERR_JRET(ctgGenerateVgList(pCtg, pOut->dbVgroup->vgHash, (SArray**)&pTask->res)); CTG_ERR_JRET(cloneDbVgInfo(pOut->dbVgroup, &pDb)); @@ -1316,7 +1316,7 @@ int32_t ctgHandleGetTbHashRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf int32_t code = 0; SCtgTask* pTask = tReq->pTask; SCtgTbHashCtx* ctx = (SCtgTbHashCtx*)pTask->taskCtx; - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); @@ -1330,7 +1330,7 @@ int32_t ctgHandleGetTbHashRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf } CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, ctx->pName, (SVgroupInfo*)pTask->res)); - + CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false)); pOut->dbVgroup = NULL; @@ -1354,7 +1354,7 @@ int32_t ctgHandleGetTbHashsRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu int32_t code = 0; SCtgTask* pTask = tReq->pTask; SCtgTbHashsCtx* ctx = (SCtgTbHashsCtx*)pTask->taskCtx; - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx); SCtgFetch* pFetch = taosArrayGet(ctx->pFetchs, tReq->msgIdx); bool taskDone = false; @@ -1367,7 +1367,7 @@ int32_t ctgHandleGetTbHashsRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu STablesReq* pReq = taosArrayGet(ctx->pNames, pFetch->dbIdx); CTG_ERR_JRET(ctgGetVgInfosFromHashValue(pCtg, tReq, pOut->dbVgroup, ctx, pMsgCtx->target, pReq->pTables, true)); - + CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, pMsgCtx->target, pOut->dbId, pOut->dbVgroup, false)); pOut->dbVgroup = NULL; @@ -1394,7 +1394,7 @@ _return: pRes->code = code; pRes->pRes = NULL; } - + if (0 == atomic_sub_fetch_32(&ctx->fetchNum, 1)) { TSWAP(pTask->res, ctx->pResList); taskDone = true; @@ -1419,9 +1419,9 @@ int32_t ctgHandleGetTbIndexRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu CTG_ERR_JRET(ctgCloneTableIndex(pOut->pIndex, &pInfo)); pTask->res = pInfo; - SCtgTbIndexCtx* ctx = pTask->taskCtx; + SCtgTbIndexCtx* ctx = pTask->taskCtx; CTG_ERR_JRET(ctgUpdateTbIndexEnqueue(pTask->pJob->pCtg, (STableIndex**)&pTask->msgCtx.out, false)); - + _return: if (TSDB_CODE_MND_DB_INDEX_NOT_EXIST == code) { @@ -1438,7 +1438,7 @@ int32_t ctgHandleGetTbCfgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf CTG_ERR_JRET(ctgProcessRspMsg(&pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); TSWAP(pTask->res, pTask->msgCtx.out); - + _return: ctgHandleTaskEnd(pTask, code); @@ -1452,7 +1452,7 @@ int32_t ctgHandleGetDbCfgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); TSWAP(pTask->res, pTask->msgCtx.out); - + _return: ctgHandleTaskEnd(pTask, code); @@ -1471,7 +1471,7 @@ int32_t ctgHandleGetQnodeRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); TSWAP(pTask->res, pTask->msgCtx.out); - + _return: ctgHandleTaskEnd(pTask, code); @@ -1485,7 +1485,7 @@ int32_t ctgHandleGetDnodeRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf CTG_ERR_JRET(ctgProcessRspMsg(&pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); TSWAP(pTask->res, pTask->msgCtx.out); - + _return: ctgHandleTaskEnd(pTask, code); @@ -1499,7 +1499,7 @@ int32_t ctgHandleGetIndexRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); TSWAP(pTask->res, pTask->msgCtx.out); - + _return: ctgHandleTaskEnd(pTask, code); @@ -1513,7 +1513,7 @@ int32_t ctgHandleGetUdfRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *p CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); TSWAP(pTask->res, pTask->msgCtx.out); - + _return: ctgHandleTaskEnd(pTask, code); @@ -1525,7 +1525,7 @@ int32_t ctgHandleGetUserRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf * int32_t code = 0; SCtgTask* pTask = tReq->pTask; SCtgUserCtx* ctx = (SCtgUserCtx*)pTask->taskCtx; - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; bool pass = false; SGetUserAuthRsp* pOut = (SGetUserAuthRsp*)pTask->msgCtx.out; @@ -1573,7 +1573,7 @@ int32_t ctgHandleGetSvrVerRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf CTG_ERR_JRET(ctgProcessRspMsg(&pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); TSWAP(pTask->res, pTask->msgCtx.out); - + _return: ctgHandleTaskEnd(pTask, code); @@ -1583,7 +1583,7 @@ _return: int32_t ctgAsyncRefreshTbMeta(SCtgTaskReq *tReq, int32_t flag, SName* pName, int32_t* vgId) { SCtgTask* pTask = tReq->pTask; - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; int32_t code = 0; @@ -1603,7 +1603,7 @@ int32_t ctgAsyncRefreshTbMeta(SCtgTaskReq *tReq, int32_t flag, SName* pName, int SCtgDBCache *dbCache = NULL; char dbFName[TSDB_DB_FNAME_LEN] = {0}; tNameGetFullDbName(pName, dbFName); - + CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache)); if (dbCache) { SVgroupInfo vgInfo = {0}; @@ -1632,7 +1632,7 @@ _return: } int32_t ctgLaunchGetTbMetaTask(SCtgTask *pTask) { - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgJob* pJob = pTask->pJob; SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); @@ -1649,14 +1649,14 @@ int32_t ctgLaunchGetTbMetaTask(SCtgTask *pTask) { SCtgTbMetaCtx* pCtx = (SCtgTbMetaCtx*)pTask->taskCtx; SCtgTaskReq tReq; tReq.pTask = pTask; - tReq.msgIdx = -1; + tReq.msgIdx = -1; CTG_ERR_RET(ctgAsyncRefreshTbMeta(&tReq, pCtx->flag, pCtx->pName, &pCtx->vgId)); return TSDB_CODE_SUCCESS; } int32_t ctgLaunchGetTbMetasTask(SCtgTask *pTask) { - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgTbMetasCtx* pCtx = (SCtgTbMetasCtx*)pTask->taskCtx; SCtgJob* pJob = pTask->pJob; @@ -1670,18 +1670,18 @@ int32_t ctgLaunchGetTbMetasTask(SCtgTask *pTask) { CTG_ERR_RET(ctgGetTbMetasFromCache(pCtg, pConn, pCtx, i, &fetchIdx, baseResIdx, pReq->pTables)); baseResIdx += taosArrayGetSize(pReq->pTables); } - + pCtx->fetchNum = taosArrayGetSize(pCtx->pFetchs); if (pCtx->fetchNum <= 0) { TSWAP(pTask->res, pCtx->pResList); - + CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0)); return TSDB_CODE_SUCCESS; } - + pTask->msgCtxs = taosArrayInit(pCtx->fetchNum, sizeof(SCtgMsgCtx)); taosArraySetSize(pTask->msgCtxs, pCtx->fetchNum); - + for (int32_t i = 0; i < pCtx->fetchNum; ++i) { SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i); SName* pName = ctgGetFetchName(pCtx->pNames, pFetch); @@ -1689,19 +1689,19 @@ int32_t ctgLaunchGetTbMetasTask(SCtgTask *pTask) { if (NULL == pMsgCtx->pBatchs) { pMsgCtx->pBatchs = pJob->pBatchs; } - + SCtgTaskReq tReq; tReq.pTask = pTask; - tReq.msgIdx = pFetch->fetchIdx; + tReq.msgIdx = pFetch->fetchIdx; CTG_ERR_RET(ctgAsyncRefreshTbMeta(&tReq, pFetch->flag, pName, &pFetch->vgId)); } - + return TSDB_CODE_SUCCESS; } int32_t ctgLaunchGetDbVgTask(SCtgTask *pTask) { int32_t code = 0; - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgDBCache *dbCache = NULL; SCtgDbVgCtx* pCtx = (SCtgDbVgCtx*)pTask->taskCtx; @@ -1710,18 +1710,18 @@ int32_t ctgLaunchGetDbVgTask(SCtgTask *pTask) { if (NULL == pMsgCtx->pBatchs) { pMsgCtx->pBatchs = pJob->pBatchs; } - + CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, pCtx->dbFName, &dbCache)); if (NULL != dbCache) { CTG_ERR_JRET(ctgGenerateVgList(pCtg, dbCache->vgCache.vgInfo->vgHash, (SArray**)&pTask->res)); ctgReleaseVgInfoToCache(pCtg, dbCache); dbCache = NULL; - + CTG_ERR_JRET(ctgHandleTaskEnd(pTask, 0)); } else { SBuildUseDBInput input = {0}; - + tstrncpy(input.db, pCtx->dbFName, tListLen(input.db)); input.vgVersion = CTG_DEFAULT_INVALID_VERSION; @@ -1742,7 +1742,7 @@ _return: int32_t ctgLaunchGetTbHashTask(SCtgTask *pTask) { int32_t code = 0; - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgDBCache *dbCache = NULL; SCtgTbHashCtx* pCtx = (SCtgTbHashCtx*)pTask->taskCtx; @@ -1751,7 +1751,7 @@ int32_t ctgLaunchGetTbHashTask(SCtgTask *pTask) { if (NULL == pMsgCtx->pBatchs) { pMsgCtx->pBatchs = pJob->pBatchs; } - + CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, pCtx->dbFName, &dbCache)); if (NULL != dbCache) { pTask->res = taosMemoryMalloc(sizeof(SVgroupInfo)); @@ -1762,17 +1762,17 @@ int32_t ctgLaunchGetTbHashTask(SCtgTask *pTask) { ctgReleaseVgInfoToCache(pCtg, dbCache); dbCache = NULL; - + CTG_ERR_JRET(ctgHandleTaskEnd(pTask, 0)); } else { SBuildUseDBInput input = {0}; - + tstrncpy(input.db, pCtx->dbFName, tListLen(input.db)); input.vgVersion = CTG_DEFAULT_INVALID_VERSION; SCtgTaskReq tReq; tReq.pTask = pTask; - tReq.msgIdx = -1; + tReq.msgIdx = -1; CTG_ERR_RET(ctgGetDBVgInfoFromMnode(pCtg, pConn, &input, NULL, &tReq)); } @@ -1786,16 +1786,16 @@ _return: } int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) { - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgTbHashsCtx* pCtx = (SCtgTbHashsCtx*)pTask->taskCtx; SCtgDBCache *dbCache = NULL; - SCtgJob* pJob = pTask->pJob; + SCtgJob* pJob = pTask->pJob; int32_t dbNum = taosArrayGetSize(pCtx->pNames); int32_t fetchIdx = 0; int32_t baseResIdx = 0; int32_t code = 0; - + for (int32_t i = 0; i < dbNum; ++i) { STablesReq* pReq = taosArrayGet(pCtx->pNames, i); @@ -1804,7 +1804,7 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) { if (NULL != dbCache) { SCtgTaskReq tReq; tReq.pTask = pTask; - tReq.msgIdx = -1; + tReq.msgIdx = -1; CTG_ERR_JRET(ctgGetVgInfosFromHashValue(pCtg, &tReq, dbCache->vgCache.vgInfo, pCtx, pReq->dbFName, pReq->pTables, false)); ctgReleaseVgInfoToCache(pCtg, dbCache); @@ -1815,21 +1815,21 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) { ctgAddFetch(&pCtx->pFetchs, i, -1, &fetchIdx, baseResIdx, 0); baseResIdx += taosArrayGetSize(pReq->pTables); - taosArraySetSize(pCtx->pResList, baseResIdx); + taosArraySetSize(pCtx->pResList, baseResIdx); } } pCtx->fetchNum = taosArrayGetSize(pCtx->pFetchs); if (pCtx->fetchNum <= 0) { TSWAP(pTask->res, pCtx->pResList); - + CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0)); return TSDB_CODE_SUCCESS; } - + pTask->msgCtxs = taosArrayInit(pCtx->fetchNum, sizeof(SCtgMsgCtx)); taosArraySetSize(pTask->msgCtxs, pCtx->fetchNum); - + for (int32_t i = 0; i < pCtx->fetchNum; ++i) { SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i); STablesReq* pReq = taosArrayGet(pCtx->pNames, pFetch->dbIdx); @@ -1837,10 +1837,10 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) { if (NULL == pMsgCtx->pBatchs) { pMsgCtx->pBatchs = pJob->pBatchs; } - + SBuildUseDBInput input = {0}; strcpy(input.db, pReq->dbFName); - + input.vgVersion = CTG_DEFAULT_INVALID_VERSION; SCtgTaskReq tReq; @@ -1854,14 +1854,14 @@ _return: if (dbCache) { ctgReleaseVgInfoToCache(pCtg, dbCache); } - + return code; } int32_t ctgLaunchGetTbIndexTask(SCtgTask *pTask) { int32_t code = 0; - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgTbIndexCtx* pCtx = (SCtgTbIndexCtx*)pTask->taskCtx; SArray* pRes = NULL; @@ -1874,18 +1874,18 @@ int32_t ctgLaunchGetTbIndexTask(SCtgTask *pTask) { CTG_ERR_RET(ctgReadTbIndexFromCache(pCtg, pCtx->pName, &pRes)); if (pRes) { pTask->res = pRes; - + CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0)); return TSDB_CODE_SUCCESS; } - + CTG_ERR_RET(ctgGetTbIndexFromMnode(pCtg, pConn, pCtx->pName, NULL, pTask)); return TSDB_CODE_SUCCESS; } int32_t ctgLaunchGetTbCfgTask(SCtgTask *pTask) { int32_t code = 0; - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgTbCfgCtx* pCtx = (SCtgTbCfgCtx*)pTask->taskCtx; SArray* pRes = NULL; @@ -1915,7 +1915,7 @@ int32_t ctgLaunchGetTbCfgTask(SCtgTask *pTask) { return TSDB_CODE_SUCCESS; } } - + CTG_ERR_JRET(ctgGetTableCfgFromVnode(pCtg, pConn, pCtx->pName, pCtx->pVgInfo, NULL, pTask)); } @@ -1926,13 +1926,13 @@ _return: if (CTG_TASK_LAUNCHED == pTask->status) { ctgHandleTaskEnd(pTask, code); } - + CTG_RET(code); } int32_t ctgLaunchGetQnodeTask(SCtgTask *pTask) { - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgJob* pJob = pTask->pJob; SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); @@ -1945,7 +1945,7 @@ int32_t ctgLaunchGetQnodeTask(SCtgTask *pTask) { } int32_t ctgLaunchGetDnodeTask(SCtgTask *pTask) { - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgJob* pJob = pTask->pJob; SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); @@ -1959,7 +1959,7 @@ int32_t ctgLaunchGetDnodeTask(SCtgTask *pTask) { int32_t ctgLaunchGetDbCfgTask(SCtgTask *pTask) { - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgDbCfgCtx* pCtx = (SCtgDbCfgCtx*)pTask->taskCtx; SCtgJob* pJob = pTask->pJob; @@ -1975,7 +1975,7 @@ int32_t ctgLaunchGetDbCfgTask(SCtgTask *pTask) { int32_t ctgLaunchGetDbInfoTask(SCtgTask *pTask) { int32_t code = 0; - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SCtgDBCache *dbCache = NULL; SCtgDbInfoCtx* pCtx = (SCtgDbInfoCtx*)pTask->taskCtx; SCtgJob* pJob = pTask->pJob; @@ -2014,7 +2014,7 @@ _return: } int32_t ctgLaunchGetIndexTask(SCtgTask *pTask) { - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgIndexCtx* pCtx = (SCtgIndexCtx*)pTask->taskCtx; SCtgJob* pJob = pTask->pJob; @@ -2029,7 +2029,7 @@ int32_t ctgLaunchGetIndexTask(SCtgTask *pTask) { } int32_t ctgLaunchGetUdfTask(SCtgTask *pTask) { - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgUdfCtx* pCtx = (SCtgUdfCtx*)pTask->taskCtx; SCtgJob* pJob = pTask->pJob; @@ -2044,7 +2044,7 @@ int32_t ctgLaunchGetUdfTask(SCtgTask *pTask) { } int32_t ctgLaunchGetUserTask(SCtgTask *pTask) { - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgUserCtx* pCtx = (SCtgUserCtx*)pTask->taskCtx; bool inCache = false; @@ -2054,7 +2054,7 @@ int32_t ctgLaunchGetUserTask(SCtgTask *pTask) { if (NULL == pMsgCtx->pBatchs) { pMsgCtx->pBatchs = pJob->pBatchs; } - + CTG_ERR_RET(ctgChkAuthFromCache(pCtg, pCtx->user.user, pCtx->user.dbFName, pCtx->user.type, &inCache, &pass)); if (inCache) { pTask->res = taosMemoryCalloc(1, sizeof(bool)); @@ -2062,7 +2062,7 @@ int32_t ctgLaunchGetUserTask(SCtgTask *pTask) { CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } *(bool*)pTask->res = pass; - + CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0)); return TSDB_CODE_SUCCESS; } @@ -2073,7 +2073,7 @@ int32_t ctgLaunchGetUserTask(SCtgTask *pTask) { } int32_t ctgLaunchGetSvrVerTask(SCtgTask *pTask) { - SCatalog* pCtg = pTask->pJob->pCtg; + SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgJob* pJob = pTask->pJob; SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); @@ -2096,7 +2096,7 @@ int32_t ctgRelaunchGetTbMetaTask(SCtgTask *pTask) { int32_t ctgGetTbCfgCb(SCtgTask *pTask) { int32_t code = 0; - + CTG_ERR_JRET(pTask->subRes.code); SCtgTbCfgCtx* pCtx = (SCtgTbCfgCtx*)pTask->taskCtx; @@ -2104,7 +2104,7 @@ int32_t ctgGetTbCfgCb(SCtgTask *pTask) { pCtx->tbType = ((STableMeta*)pTask->subRes.res)->tableType; } else if (CTG_TASK_GET_DB_VGROUP == pTask->subRes.type) { SDBVgInfo* pDb = (SDBVgInfo*)pTask->subRes.res; - + pCtx->pVgInfo = taosMemoryCalloc(1, sizeof(SVgroupInfo)); CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, pDb, pCtx->pName, pCtx->pVgInfo)); } @@ -2167,7 +2167,7 @@ SCtgAsyncFps gCtgAsyncFps[] = { int32_t ctgMakeAsyncRes(SCtgJob *pJob) { int32_t code = 0; int32_t taskNum = taosArrayGetSize(pJob->pTasks); - + for (int32_t i = 0; i < taskNum; ++i) { SCtgTask *pTask = taosArrayGet(pJob->pTasks, i); CTG_ERR_RET((*gCtgAsyncFps[pTask->type].dumpResFp)(pTask)); @@ -2180,16 +2180,16 @@ int32_t ctgSearchExistingTask(SCtgJob *pJob, CTG_TASK_TYPE type, void* param, in bool equal = false; SCtgTask* pTask = NULL; int32_t code = 0; - + CTG_LOCK(CTG_READ, &pJob->taskLock); - + int32_t taskNum = taosArrayGetSize(pJob->pTasks); for (int32_t i = 0; i < taskNum; ++i) { pTask = taosArrayGet(pJob->pTasks, i); if (type != pTask->type) { continue; } - + CTG_ERR_JRET((*gCtgAsyncFps[type].compFp)(pTask, param, &equal)); if (equal) { break; @@ -2208,7 +2208,7 @@ _return: int32_t ctgSetSubTaskCb(SCtgTask *pSub, SCtgTask *pTask) { int32_t code = 0; - + CTG_LOCK(CTG_WRITE, &pSub->lock); if (CTG_TASK_DONE == pSub->status) { pTask->subRes.code = pSub->code; @@ -2216,7 +2216,7 @@ int32_t ctgSetSubTaskCb(SCtgTask *pSub, SCtgTask *pTask) { SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); SCtgMsgCtx *pSubMsgCtx = CTG_GET_TASK_MSGCTX(pSub, -1); pMsgCtx->pBatchs = pSubMsgCtx->pBatchs; - + CTG_ERR_JRET(pTask->subRes.fp(pTask)); } else { if (NULL == pSub->pParents) { @@ -2230,7 +2230,7 @@ _return: CTG_UNLOCK(CTG_WRITE, &pSub->lock); - CTG_RET(code); + CTG_RET(code); } @@ -2242,13 +2242,13 @@ int32_t ctgLaunchSubTask(SCtgTask *pTask, CTG_TASK_TYPE type, ctgSubTaskCbFp fp, ctgClearSubTaskRes(&pTask->subRes); pTask->subRes.type = type; pTask->subRes.fp = fp; - + CTG_ERR_RET(ctgSearchExistingTask(pJob, type, param, &subTaskId)); if (subTaskId < 0) { CTG_ERR_RET(ctgInitTask(pJob, type, param, &subTaskId)); newTask = true; } - + SCtgTask* pSub = taosArrayGet(pJob->pTasks, subTaskId); CTG_ERR_RET(ctgSetSubTaskCb(pSub, pTask)); @@ -2267,21 +2267,21 @@ int32_t ctgLaunchSubTask(SCtgTask *pTask, CTG_TASK_TYPE type, ctgSubTaskCbFp fp, int32_t ctgLaunchJob(SCtgJob *pJob) { int32_t taskNum = taosArrayGetSize(pJob->pTasks); - + for (int32_t i = 0; i < taskNum; ++i) { SCtgTask *pTask = taosArrayGet(pJob->pTasks, i); qDebug("QID:0x%" PRIx64 " ctg launch [%dth] task", pJob->queryId, pTask->taskId); CTG_ERR_RET((*gCtgAsyncFps[pTask->type].launchFp)(pTask)); - + pTask->status = CTG_TASK_LAUNCHED; } if (taskNum <= 0) { qDebug("QID:0x%" PRIx64 " ctg call user callback with rsp %s", pJob->queryId, tstrerror(pJob->jobResCode)); - + taosAsyncExec(ctgCallUserCb, pJob, NULL); -#if CTG_BATCH_FETCH +#if CTG_BATCH_FETCH } else { ctgLaunchBatchs(pJob->pCtg, pJob, pJob->pBatchs); #endif diff --git a/source/libs/command/inc/commandInt.h b/source/libs/command/inc/commandInt.h index 53d118e1adc74f4c532d9246b9cb8c4eba2910b6..706985f89468455b49876ac8d20edef634c42ff9 100644 --- a/source/libs/command/inc/commandInt.h +++ b/source/libs/command/inc/commandInt.h @@ -100,7 +100,6 @@ extern "C" { typedef struct SExplainGroup { int32_t nodeNum; int32_t physiPlanExecNum; - int32_t physiPlanNum; int32_t physiPlanExecIdx; SRWLatch lock; SSubplan *plan; diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index 1b2489acd62bec88eac5bd5aca54a6d2f00ff1ab..18d839e1091e3fc5f1be2939a22345efe8ea8579 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -17,6 +17,7 @@ #include "catalog.h" #include "commandInt.h" #include "scheduler.h" +#include "systable.h" #include "tdatablock.h" #include "tglobal.h" #include "tgrant.h" @@ -75,46 +76,41 @@ static SSDataBlock* buildDescResultDataBlock() { return pBlock; } -static void setDescResultIntoDataBlock(SSDataBlock* pBlock, int32_t numOfRows, STableMeta* pMeta) { +static void setDescResultIntoDataBlock(bool sysInfoUser, SSDataBlock* pBlock, int32_t numOfRows, STableMeta* pMeta) { blockDataEnsureCapacity(pBlock, numOfRows); - pBlock->info.rows = numOfRows; + pBlock->info.rows = 0; // field SColumnInfoData* pCol1 = taosArrayGet(pBlock->pDataBlock, 0); - char buf[DESCRIBE_RESULT_FIELD_LEN] = {0}; - for (int32_t i = 0; i < numOfRows; ++i) { - STR_TO_VARSTR(buf, pMeta->schema[i].name); - colDataAppend(pCol1, i, buf, false); - } - // Type SColumnInfoData* pCol2 = taosArrayGet(pBlock->pDataBlock, 1); - for (int32_t i = 0; i < numOfRows; ++i) { - STR_TO_VARSTR(buf, tDataTypes[pMeta->schema[i].type].name); - colDataAppend(pCol2, i, buf, false); - } - // Length SColumnInfoData* pCol3 = taosArrayGet(pBlock->pDataBlock, 2); - for (int32_t i = 0; i < numOfRows; ++i) { - int32_t bytes = getSchemaBytes(pMeta->schema + i); - colDataAppend(pCol3, i, (const char*)&bytes, false); - } - // Note SColumnInfoData* pCol4 = taosArrayGet(pBlock->pDataBlock, 3); + char buf[DESCRIBE_RESULT_FIELD_LEN] = {0}; for (int32_t i = 0; i < numOfRows; ++i) { + if (invisibleColumn(sysInfoUser, pMeta->tableType, pMeta->schema[i].flags)) { + continue; + } + STR_TO_VARSTR(buf, pMeta->schema[i].name); + colDataAppend(pCol1, pBlock->info.rows, buf, false); + STR_TO_VARSTR(buf, tDataTypes[pMeta->schema[i].type].name); + colDataAppend(pCol2, pBlock->info.rows, buf, false); + int32_t bytes = getSchemaBytes(pMeta->schema + i); + colDataAppend(pCol3, pBlock->info.rows, (const char*)&bytes, false); STR_TO_VARSTR(buf, i >= pMeta->tableInfo.numOfColumns ? "TAG" : ""); - colDataAppend(pCol4, i, buf, false); + colDataAppend(pCol4, pBlock->info.rows, buf, false); + ++(pBlock->info.rows); } } -static int32_t execDescribe(SNode* pStmt, SRetrieveTableRsp** pRsp) { +static int32_t execDescribe(bool sysInfoUser, SNode* pStmt, SRetrieveTableRsp** pRsp) { SDescribeStmt* pDesc = (SDescribeStmt*)pStmt; int32_t numOfRows = TABLE_TOTAL_COL_NUM(pDesc->pMeta); SSDataBlock* pBlock = buildDescResultDataBlock(); - setDescResultIntoDataBlock(pBlock, numOfRows, pDesc->pMeta); + setDescResultIntoDataBlock(sysInfoUser, pBlock, numOfRows, pDesc->pMeta); return buildRetrieveTableRsp(pBlock, DESCRIBE_RESULT_COLS, pRsp); } @@ -475,6 +471,7 @@ static int32_t setCreateTBResultIntoDataBlock(SSDataBlock* pBlock, SDbCfgInfo* p len += sprintf(buf2 + VARSTR_HEADER_SIZE, "CREATE TABLE `%s` (", tbName); appendColumnFields(buf2, &len, pCfg); len += sprintf(buf2 + VARSTR_HEADER_SIZE + len, ")"); + appendTableOptions(buf2, &len, pDbCfg, pCfg); } varDataLen(buf2) = len; @@ -665,10 +662,10 @@ static int32_t execSelectWithoutFrom(SSelectStmt* pSelect, SRetrieveTableRsp** p return code; } -int32_t qExecCommand(SNode* pStmt, SRetrieveTableRsp** pRsp) { +int32_t qExecCommand(bool sysInfoUser, SNode* pStmt, SRetrieveTableRsp** pRsp) { switch (nodeType(pStmt)) { case QUERY_NODE_DESCRIBE_STMT: - return execDescribe(pStmt, pRsp); + return execDescribe(sysInfoUser, pStmt, pRsp); case QUERY_NODE_RESET_QUERY_CACHE_STMT: return execResetQueryCache(); case QUERY_NODE_SHOW_CREATE_DATABASE_STMT: diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 9da916855583f189b475d07a824efa7baf3f23de..967c682b0bb701502dff90081fba8973a34bd22a 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -296,8 +296,6 @@ int32_t qExplainGenerateResNode(SPhysiNode *pNode, SExplainGroup *group, SExplai QRY_ERR_JRET(qExplainGenerateResChildren(pNode, group, &resNode->pChildren)); - ++group->physiPlanNum; - *pResNode = resNode; return TSDB_CODE_SUCCESS; @@ -1548,12 +1546,6 @@ int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level) { QRY_ERR_RET(qExplainGenerateResNode(group->plan->pNode, group, &node)); - if ((EXPLAIN_MODE_ANALYZE == ctx->mode) && (group->physiPlanNum != group->physiPlanExecNum)) { - qError("physiPlanNum %d mismatch with physiExecNum %d in group %d", group->physiPlanNum, group->physiPlanExecNum, - groupId); - QRY_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - QRY_ERR_JRET(qExplainResNodeToRows(node, ctx, level)); _return: @@ -1578,12 +1570,9 @@ int32_t qExplainGetRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) { SColumnInfoData *pInfoData = taosArrayGet(pBlock->pDataBlock, 0); - char buf[1024] = {0}; for (int32_t i = 0; i < rowNum; ++i) { SQueryExplainRowInfo *row = taosArrayGet(pCtx->rows, i); - varDataCopy(buf, row->buf); - ASSERT(varDataTLen(row->buf) == row->len); - colDataAppend(pInfoData, i, buf, false); + colDataAppend(pInfoData, i, row->buf, false); } pBlock->info.rows = rowNum; diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index 52c73f85f55703607b1da579860574af60dca1ef..9e7fcc222788e16f60252727ba7ca7c911366e9b 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -22,6 +22,13 @@ #include "tbuffer.h" #include "tcommon.h" #include "tpagedbuf.h" +#include "tsimplehash.h" + +#define T_LONG_JMP(_obj, _c) \ + do { \ + ASSERT((_c) != -1); \ + longjmp((_obj), (_c)); \ + } while (0); #define SET_RES_WINDOW_KEY(_k, _ori, _len, _uid) \ do { \ @@ -80,11 +87,9 @@ struct SqlFunctionCtx; size_t getResultRowSize(struct SqlFunctionCtx* pCtx, int32_t numOfOutput); void initResultRowInfo(SResultRowInfo* pResultRowInfo); -void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo); void initResultRow(SResultRow* pResultRow); void closeResultRow(SResultRow* pResultRow); -bool isResultRowClosed(SResultRow* pResultRow); struct SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t index, const int32_t* offset); @@ -102,7 +107,7 @@ static FORCE_INLINE void setResultBufPageDirty(SDiskbasedBuf* pBuf, SResultRowPo setBufPageDirty(pPage, true); } -void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int32_t order); +void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, int32_t order); void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo); void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList); @@ -115,6 +120,7 @@ SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode); EDealRes doTranslateTagExpr(SNode** pNode, void* pContext); int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, STableListInfo* pListInfo); int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId); +int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableListInfo* pTableListInfo); size_t getTableTagsBufLen(const SNodeList* pGroups); SArray* createSortInfo(SNodeList* pNodeList); diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 311d82c8a28bccfbc92344e75d85099a69f94289..b4e28403300249cc71e32f427505a8520e6f1297 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -122,7 +122,7 @@ typedef int32_t (*__optr_decode_fn_t)(struct SOperatorInfo* pOperator, char* res typedef int32_t (*__optr_open_fn_t)(struct SOperatorInfo* pOptr); typedef SSDataBlock* (*__optr_fn_t)(struct SOperatorInfo* pOptr); -typedef void (*__optr_close_fn_t)(void* param, int32_t num); +typedef void (*__optr_close_fn_t)(void* param); typedef int32_t (*__optr_explain_fn_t)(struct SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len); typedef struct STaskIdInfo { @@ -142,7 +142,11 @@ typedef struct { //TODO remove prepareStatus STqOffsetVal prepareStatus; // for tmq STqOffsetVal lastStatus; // for tmq - void* metaBlk; // for tmq fetching meta + SMqMetaRsp metaRsp; // for tmq fetching meta + int64_t snapshotVer; + + SSchemaWrapper *schema; + char tbName[TSDB_TABLE_NAME_LEN]; SSDataBlock* pullOverBlk; // for streaming SWalFilterCond cond; int64_t lastScanUid; @@ -150,6 +154,7 @@ typedef struct { SQueryTableDataCond tableCond; int64_t recoverStartVer; int64_t recoverEndVer; + SStreamState* pState; } SStreamTaskInfo; typedef struct { @@ -205,7 +210,8 @@ typedef struct SExprSupp { } SExprSupp; typedef struct SOperatorInfo { - uint8_t operatorType; + uint16_t operatorType; + int16_t resultDataBlockId; bool blocking; // block operator or not uint8_t status; // denote if current operator is completed char* name; // name, for debug purpose @@ -217,7 +223,6 @@ typedef struct SOperatorInfo { struct SOperatorInfo** pDownstream; // downstram pointer list int32_t numOfDownstream; // number of downstream. The value is always ONE expect for join operator SOperatorFpSet fpSet; - int16_t resultDataBlockId; } SOperatorInfo; typedef enum { @@ -296,10 +301,11 @@ enum { }; typedef struct SAggSupporter { - SHashObj* pResultRowHashTable; // quick locate the window object for each result - char* keyBuf; // window key buffer - SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file - int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row + SSHashObj* pResultRowHashTable; // quick locate the window object for each result + char* keyBuf; // window key buffer + SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file + int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row + int32_t currentPageId; // current write page id } SAggSupporter; typedef struct { @@ -324,7 +330,6 @@ typedef struct STableScanInfo { SQueryTableDataCond cond; int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan int32_t dataBlockLoadFlag; -// SInterval interval; // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if current data block needs to be loaded. SSampleExecInfo sample; // sample execution info int32_t currentGroupId; int32_t currentTable; @@ -428,13 +433,14 @@ typedef struct SStreamAggSupporter { char* pKeyBuf; // window key buffer SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row + int32_t currentPageId; // buffer page that is active SSDataBlock* pScanBlock; } SStreamAggSupporter; typedef struct SessionWindowSupporter { SStreamAggSupporter* pStreamAggSup; int64_t gap; - uint8_t parentType; + uint16_t parentType; SAggSupporter* pIntervalAggSup; } SessionWindowSupporter; @@ -482,12 +488,24 @@ typedef struct SStreamScanInfo { STimeWindowAggSupp twAggSup; SSDataBlock* pUpdateDataRes; // status for tmq - // SSchemaWrapper schema; - SNodeList* pGroupTags; - SNode* pTagCond; - SNode* pTagIndexCond; + SNodeList* pGroupTags; + SNode* pTagCond; + SNode* pTagIndexCond; } SStreamScanInfo; +typedef struct { + // int8_t subType; + // bool withMeta; + // int64_t suid; + // int64_t snapVersion; + // void *metaInfo; + // void *dataInfo; + SVnode* vnode; + SSDataBlock pRes; // result SSDataBlock + STsdbReader* dataReader; + SSnapContext* sContext; +} SStreamRawScanInfo; + typedef struct SSysTableScanInfo { SRetrieveMetaTableRsp* pRsp; SRetrieveTableReq req; @@ -496,6 +514,7 @@ typedef struct SSysTableScanInfo { SReadHandle readHandle; int32_t accountId; const char* pUser; + bool sysInfo; bool showRewrite; SNode* pCondition; // db_name filter condition, to discard data that are not in current database SMTbCursor* pCur; // cursor for iterate the local table meta store. @@ -510,14 +529,14 @@ typedef struct SBlockDistInfo { SSDataBlock* pResBlock; void* pHandle; SReadHandle readHandle; - uint64_t uid; // table uid + uint64_t uid; // table uid } SBlockDistInfo; // todo remove this typedef struct SOptrBasicInfo { - SResultRowInfo resultRowInfo; - SSDataBlock* pRes; - bool mergeResultBlock; + SResultRowInfo resultRowInfo; + SSDataBlock* pRes; + bool mergeResultBlock; } SOptrBasicInfo; typedef struct SIntervalAggOperatorInfo { @@ -860,8 +879,8 @@ int32_t handleLimitOffset(SOperatorInfo *pOperator, SLimitInfo* pLimitInfo, SSDa bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo); void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimitInfo); -void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow* pWin, SColumnInfoData* pTimeWindowData, int32_t offset, - int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput, int32_t order); +void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfoData* pTimeWindowData, int32_t offset, + int32_t forwardStep, int32_t numOfTotal, int32_t numOfOutput); int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, int32_t numOfOutput, SArray* pColList, char** pNextStart); void updateLoadRemoteInfo(SLoadRemoteDataInfo *pInfo, int32_t numOfRows, int32_t dataLen, int64_t startTs, @@ -907,47 +926,34 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pProjPhyNode, SExecTaskInfo* pTaskInfo); SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortNode, SExecTaskInfo* pTaskInfo); SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** dowStreams, size_t numStreams, SMergePhysiNode* pMergePhysiNode, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createSortedMergeOperatorInfo(SOperatorInfo** downstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t num, SArray* pSortInfo, SArray* pGroupInfo, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pTableScanNode, SReadHandle* readHandle, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pTableScanNode, SReadHandle* readHandle, SExecTaskInfo* pTaskInfo); SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, STimeWindowAggSupp* pTwAggSupp, SIntervalPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, bool isStream); - -SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, - SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - bool mergeResultBlock, SExecTaskInfo* pTaskInfo); - -SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, - SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - SNode* pCondition, bool mergeResultBlocks, SExecTaskInfo* pTaskInfo); - +SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMergeIntervalPhysiNode* pIntervalPhyNode, + SExecTaskInfo* pTaskInfo); +SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SMergeAlignedIntervalPhysiNode* pNode, + SExecTaskInfo* pTaskInfo); SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild); -SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, - SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo); SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionWinodwPhysiNode* pSessionNode, SExecTaskInfo* pTaskInfo); SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SArray* pGroupColList, SNode* pCondition, SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SReadHandle* readHandle, uint64_t uid, SBlockDistScanPhysiNode* pBlockScanNode, - SExecTaskInfo* pTaskInfo); +SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SReadHandle* readHandle, uint64_t uid, + SBlockDistScanPhysiNode* pBlockScanNode, SExecTaskInfo* pTaskInfo); SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode, SNode* pTagCond, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFillNode, SExecTaskInfo* pTaskInfo); - -SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols, - SSDataBlock* pResBlock, STimeWindowAggSupp *pTwAggSupp, int32_t tsSlotId, - SColumn* pStateKeyCol, SNode* pCondition, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFillNode, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWinodwPhysiNode* pStateNode, SExecTaskInfo* pTaskInfo); SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNode* pPartNode, SExecTaskInfo* pTaskInfo); - SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pNode, SExecTaskInfo* pTaskInfo); - SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SSortMergeJoinPhysiNode* pJoinNode, SExecTaskInfo* pTaskInfo); @@ -958,10 +964,6 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo); -#if 0 -SOperatorInfo* createTableSeqScanOperatorInfo(void* pTsdbReadHandle, STaskRuntimeEnv* pRuntimeEnv); -#endif - int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock* pSrcBlock, SqlFunctionCtx* pCtx, int32_t numOfOutput, SArray* pPseudoList); @@ -1009,7 +1011,7 @@ int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimary int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order); int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t size); -SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize); +SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize); SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endTs, uint64_t groupId, int64_t gap, int32_t* pIndex); SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, @@ -1019,7 +1021,7 @@ bool functionNeedToExecute(SqlFunctionCtx* pCtx); bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup); bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup); bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup); -void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid); +void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, int32_t uidCol, uint64_t* pID); void printDataBlock(SSDataBlock* pBlock, const char* flag); int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, diff --git a/source/libs/executor/inc/tsimplehash.h b/source/libs/executor/inc/tsimplehash.h index a56f8e8c049cca1cf606541ea8938f4f648bb32b..27191e3b7e674df4dcec9dabc7b8cc6fbb35f9f2 100644 --- a/source/libs/executor/inc/tsimplehash.h +++ b/source/libs/executor/inc/tsimplehash.h @@ -17,7 +17,6 @@ #define TDENGINE_TSIMPLEHASH_H #include "tarray.h" -#include "tlockfree.h" #ifdef __cplusplus extern "C" { @@ -27,6 +26,10 @@ typedef uint32_t (*_hash_fn_t)(const char *, uint32_t); typedef int32_t (*_equal_fn_t)(const void *, const void *, size_t len); typedef void (*_hash_free_fn_t)(void *); +/** + * @brief single thread hash + * + */ typedef struct SSHashObj SSHashObj; /** @@ -36,7 +39,7 @@ typedef struct SSHashObj SSHashObj; * @param fn hash function to generate the hash value * @return */ -SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn, size_t keyLen, size_t dataLen); +SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn); /** * return the size of hash table @@ -48,22 +51,26 @@ int32_t tSimpleHashGetSize(const SSHashObj *pHashObj); int32_t tSimpleHashPrint(const SSHashObj *pHashObj); /** - * put element into hash table, if the element with the same key exists, update it + * @brief put element into hash table, if the element with the same key exists, update it + * * @param pHashObj * @param key + * @param keyLen * @param data - * @return + * @param dataLen + * @return int32_t */ -int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, const void *data); +int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, const void *data, size_t dataLen); /** * return the payload data with the specified key * * @param pHashObj * @param key + * @param keyLen * @return */ -void *tSimpleHashGet(SSHashObj *pHashObj, const void *key); +void *tSimpleHashGet(SSHashObj *pHashObj, const void *key, size_t keyLen); /** * remove item with the specified key @@ -71,7 +78,19 @@ void *tSimpleHashGet(SSHashObj *pHashObj, const void *key); * @param key * @param keyLen */ -int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key); +int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key, size_t keyLen); + +/** + * remove item with the specified key during hash iterate + * + * @param pHashObj + * @param key + * @param keyLen + * @param pIter + * @param iter + * @return int32_t + */ +int32_t tSimpleHashIterateRemove(SSHashObj *pHashObj, const void *key, size_t keyLen, void **pIter, int32_t *iter); /** * Clear the hash table. @@ -92,13 +111,27 @@ void tSimpleHashCleanup(SSHashObj *pHashObj); */ size_t tSimpleHashGetMemSize(const SSHashObj *pHashObj); +#pragma pack(push, 4) +typedef struct SHNode{ + struct SHNode *next; + uint32_t keyLen : 20; + uint32_t dataLen : 12; + char data[]; +} SHNode; +#pragma pack(pop) + /** * Get the corresponding key information for a given data in hash table * @param data * @param keyLen * @return */ -void *tSimpleHashGetKey(const SSHashObj* pHashObj, void *data, size_t* keyLen); +static FORCE_INLINE void *tSimpleHashGetKey(void *data, size_t *keyLen) { + SHNode *node = (SHNode *)((char *)data - offsetof(SHNode, data)); + if (keyLen) *keyLen = node->keyLen; + + return POINTER_SHIFT(data, node->dataLen); +} /** * Create the hash table iterator @@ -112,4 +145,4 @@ void *tSimpleHashIterate(const SSHashObj *pHashObj, void *data, int32_t *iter); #ifdef __cplusplus } #endif -#endif // TDENGINE_TSIMPLEHASH_H +#endif // TDENGINE_TSIMPLEHASH_H \ No newline at end of file diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c index 94e4384b3025f0d2ecbbaafd9f92ad10aa84b926..94d9d0cadbd1cf21ac8303a4bee7b86da9695f3c 100644 --- a/source/libs/executor/src/cachescanoperator.c +++ b/source/libs/executor/src/cachescanoperator.c @@ -24,26 +24,28 @@ #include "tcompare.h" #include "thash.h" #include "ttypes.h" -#include "executorInt.h" -static SSDataBlock* doScanLastrow(SOperatorInfo* pOperator); -static void destroyLastrowScanOperator(void* param, int32_t numOfOutput); +static SSDataBlock* doScanCache(SOperatorInfo* pOperator); +static void destroyLastrowScanOperator(void* param); static int32_t extractTargetSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInfo, int32_t** pSlotIds); -SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SReadHandle* readHandle, SExecTaskInfo* pTaskInfo) { +SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SReadHandle* readHandle, + SExecTaskInfo* pTaskInfo) { + int32_t code = TSDB_CODE_SUCCESS; SLastrowScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SLastrowScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; goto _error; } pInfo->readHandle = *readHandle; - pInfo->pRes = createResDataBlock(pScanNode->scan.node.pOutputDataBlockDesc); + pInfo->pRes = createResDataBlock(pScanNode->scan.node.pOutputDataBlockDesc); int32_t numOfCols = 0; pInfo->pColMatchInfo = extractColMatchInfo(pScanNode->scan.pScanCols, pScanNode->scan.node.pOutputDataBlockDesc, &numOfCols, COL_MATCH_FROM_COL_ID); - int32_t code = extractTargetSlotId(pInfo->pColMatchInfo, pTaskInfo, &pInfo->pSlotIds); + code = extractTargetSlotId(pInfo->pColMatchInfo, pTaskInfo, &pInfo->pSlotIds); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -56,13 +58,17 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead // partition by tbname if (taosArrayGetSize(pTableList->pGroupList) == taosArrayGetSize(pTableList->pTableList)) { - pInfo->retrieveType = LASTROW_RETRIEVE_TYPE_ALL; - tsdbLastRowReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pTableList->pTableList, - taosArrayGetSize(pInfo->pColMatchInfo), &pInfo->pLastrowReader); + pInfo->retrieveType = CACHESCAN_RETRIEVE_TYPE_ALL|CACHESCAN_RETRIEVE_LAST_ROW; + code = tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pTableList->pTableList, + taosArrayGetSize(pInfo->pColMatchInfo), &pInfo->pLastrowReader); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + pInfo->pBufferredRes = createOneDataBlock(pInfo->pRes, false); blockDataEnsureCapacity(pInfo->pBufferredRes, pOperator->resultInfo.capacity); } else { // by tags - pInfo->retrieveType = LASTROW_RETRIEVE_TYPE_SINGLE; + pInfo->retrieveType = CACHESCAN_RETRIEVE_TYPE_SINGLE|CACHESCAN_RETRIEVE_LAST_ROW; } if (pScanNode->scan.pScanPseudoCols != NULL) { @@ -81,19 +87,19 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock); pOperator->fpSet = - createOperatorFpSet(operatorDummyOpenFn, doScanLastrow, NULL, NULL, destroyLastrowScanOperator, NULL, NULL, NULL); + createOperatorFpSet(operatorDummyOpenFn, doScanCache, NULL, NULL, destroyLastrowScanOperator, NULL, NULL, NULL); pOperator->cost.openCost = 0; return pOperator; _error: - pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY; - taosMemoryFree(pInfo); + pTaskInfo->code = code; + destroyLastrowScanOperator(pInfo); taosMemoryFree(pOperator); return NULL; } -SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) { +SSDataBlock* doScanCache(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { return NULL; } @@ -110,14 +116,14 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) { blockDataCleanup(pInfo->pRes); // check if it is a group by tbname - if (pInfo->retrieveType == LASTROW_RETRIEVE_TYPE_ALL) { + if ((pInfo->retrieveType & CACHESCAN_RETRIEVE_TYPE_ALL) == CACHESCAN_RETRIEVE_TYPE_ALL) { if (pInfo->indexOfBufferedRes >= pInfo->pBufferredRes->info.rows) { blockDataCleanup(pInfo->pBufferredRes); taosArrayClear(pInfo->pUidList); - int32_t code = tsdbRetrieveLastRow(pInfo->pLastrowReader, pInfo->pBufferredRes, pInfo->pSlotIds, pInfo->pUidList); + int32_t code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pBufferredRes, pInfo->pSlotIds, pInfo->pUidList); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } // check for tag values @@ -173,11 +179,11 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) { while (pInfo->currentGroupIndex < totalGroups) { SArray* pGroupTableList = taosArrayGetP(pTableList->pGroupList, pInfo->currentGroupIndex); - tsdbLastRowReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pGroupTableList, + tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pGroupTableList, taosArrayGetSize(pInfo->pColMatchInfo), &pInfo->pLastrowReader); taosArrayClear(pInfo->pUidList); - int32_t code = tsdbRetrieveLastRow(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pUidList); + int32_t code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pUidList); if (code != TSDB_CODE_SUCCESS) { longjmp(pTaskInfo->env, code); } @@ -201,7 +207,7 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) { } } - tsdbLastrowReaderClose(pInfo->pLastrowReader); + tsdbCacherowsReaderClose(pInfo->pLastrowReader); return pInfo->pRes; } } @@ -211,7 +217,7 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) { } } -void destroyLastrowScanOperator(void* param, int32_t numOfOutput) { +void destroyLastrowScanOperator(void* param) { SLastrowScanInfo* pInfo = (SLastrowScanInfo*)param; blockDataDestroy(pInfo->pRes); taosMemoryFreeClear(param); diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 34247d3b47768a92ee4c0fa658dac3bb08b6f1fd..80c1494f8dc53c38bd8029eb69effd2ee270bf3a 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -31,20 +31,6 @@ void initResultRowInfo(SResultRowInfo* pResultRowInfo) { pResultRowInfo->cur.pageId = -1; } -void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo) { - if (pResultRowInfo == NULL) { - return; - } - - for (int32_t i = 0; i < pResultRowInfo->size; ++i) { - // if (pResultRowInfo->pResult[i]) { - // taosMemoryFreeClear(pResultRowInfo->pResult[i]->key); - // } - } -} - -bool isResultRowClosed(SResultRow* pRow) { return (pRow->closed == true); } - void closeResultRow(SResultRow* pResultRow) { pResultRow->closed = true; } // TODO refactor: use macro @@ -60,8 +46,8 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) { rowSize += pCtx[i].resDataInfo.interBufSize; } - rowSize += - (numOfOutput * sizeof(bool)); // expand rowSize to mark if col is null for top/bottom result(doSaveTupleData) + rowSize += (numOfOutput * sizeof(bool)); + // expand rowSize to mark if col is null for top/bottom result(saveTupleData) return rowSize; } @@ -97,7 +83,7 @@ int32_t resultrowComparAsc(const void* p1, const void* p2) { static int32_t resultrowComparDesc(const void* p1, const void* p2) { return resultrowComparAsc(p2, p1); } -void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int32_t order) { +void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, int32_t order) { if (pGroupResInfo->pRows != NULL) { taosArrayDestroy(pGroupResInfo->pRows); } @@ -106,9 +92,10 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int void* pData = NULL; pGroupResInfo->pRows = taosArrayInit(10, POINTER_BYTES); - size_t keyLen = 0; - while ((pData = taosHashIterate(pHashmap, pData)) != NULL) { - void* key = taosHashGetKey(pData, &keyLen); + size_t keyLen = 0; + int32_t iter = 0; + while ((pData = tSimpleHashIterate(pHashmap, pData, &iter)) != NULL) { + void* key = tSimpleHashGetKey(pData, &keyLen); SResKeyPos* p = taosMemoryMalloc(keyLen + sizeof(SResultRowPosition)); @@ -221,7 +208,7 @@ EDealRes doTranslateTagExpr(SNode** pNode, void* pContext) { STagVal tagVal = {0}; tagVal.cid = pSColumnNode->colId; - const char* p = metaGetTableTagVal(&mr->me, pSColumnNode->node.resType.type, &tagVal); + const char* p = metaGetTableTagVal(mr->me.ctbEntry.pTags, pSColumnNode->node.resType.type, &tagVal); if (p == NULL) { res->node.resType.type = TSDB_DATA_TYPE_NULL; } else if (pSColumnNode->node.resType.type == TSDB_DATA_TYPE_JSON) { @@ -298,6 +285,453 @@ int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, return TSDB_CODE_SUCCESS; } +typedef struct tagFilterAssist { + SHashObj* colHash; + int32_t index; + SArray* cInfoList; +} tagFilterAssist; + +static EDealRes getColumn(SNode** pNode, void* pContext) { + SColumnNode* pSColumnNode = NULL; + if (QUERY_NODE_COLUMN == nodeType((*pNode))) { + pSColumnNode = *(SColumnNode**)pNode; + } else if (QUERY_NODE_FUNCTION == nodeType((*pNode))) { + SFunctionNode* pFuncNode = *(SFunctionNode**)(pNode); + if (pFuncNode->funcType == FUNCTION_TYPE_TBNAME) { + pSColumnNode = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); + if (NULL == pSColumnNode) { + return DEAL_RES_ERROR; + } + pSColumnNode->colId = -1; + pSColumnNode->colType = COLUMN_TYPE_TBNAME; + pSColumnNode->node.resType.type = TSDB_DATA_TYPE_VARCHAR; + pSColumnNode->node.resType.bytes = TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE; + nodesDestroyNode(*pNode); + *pNode = (SNode*)pSColumnNode; + } else { + return DEAL_RES_CONTINUE; + } + } else { + return DEAL_RES_CONTINUE; + } + + tagFilterAssist* pData = (tagFilterAssist*)pContext; + void* data = taosHashGet(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId)); + if (!data) { + taosHashPut(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId), pNode, sizeof((*pNode))); + pSColumnNode->slotId = pData->index++; + SColumnInfo cInfo = {.colId = pSColumnNode->colId, + .type = pSColumnNode->node.resType.type, + .bytes = pSColumnNode->node.resType.bytes}; +#if TAG_FILTER_DEBUG + qDebug("tagfilter build column info, slotId:%d, colId:%d, type:%d", pSColumnNode->slotId, cInfo.colId, cInfo.type); +#endif + taosArrayPush(pData->cInfoList, &cInfo); + } else { + SColumnNode* col = *(SColumnNode**)data; + pSColumnNode->slotId = col->slotId; + } + + return DEAL_RES_CONTINUE; +} + +static int32_t createResultData(SDataType* pType, int32_t numOfRows, SScalarParam* pParam) { + SColumnInfoData* pColumnData = taosMemoryCalloc(1, sizeof(SColumnInfoData)); + if (pColumnData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return terrno; + } + + pColumnData->info.type = pType->type; + pColumnData->info.bytes = pType->bytes; + pColumnData->info.scale = pType->scale; + pColumnData->info.precision = pType->precision; + + int32_t code = colInfoDataEnsureCapacity(pColumnData, numOfRows); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + taosMemoryFree(pColumnData); + return terrno; + } + + pParam->columnData = pColumnData; + pParam->colAlloced = true; + return TSDB_CODE_SUCCESS; +} + +static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray* uidList, SNode* pTagCond) { + int32_t code = TSDB_CODE_SUCCESS; + SArray* pBlockList = NULL; + SSDataBlock* pResBlock = NULL; + SHashObj* tags = NULL; + SScalarParam output = {0}; + + tagFilterAssist ctx = {0}; + + ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); + if (ctx.colHash == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + ctx.index = 0; + ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); + if (ctx.cInfoList == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + + nodesRewriteExprPostOrder(&pTagCond, getColumn, (void*)&ctx); + + pResBlock = createDataBlock(); + if (pResBlock == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + + for (int32_t i = 0; i < taosArrayGetSize(ctx.cInfoList); ++i) { + SColumnInfoData colInfo = {{0}, 0}; + colInfo.info = *(SColumnInfo*)taosArrayGet(ctx.cInfoList, i); + blockDataAppendColInfo(pResBlock, &colInfo); + } + + // int64_t stt = taosGetTimestampUs(); + tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + code = metaGetTableTags(metaHandle, suid, uidList, tags); + if (code != TSDB_CODE_SUCCESS) { + qError("failed to get table tags from meta, reason:%s, suid:%" PRIu64, tstrerror(code), suid); + terrno = code; + goto end; + } + + int32_t rows = taosArrayGetSize(uidList); + if (rows == 0) { + goto end; + } + // int64_t stt1 = taosGetTimestampUs(); + // qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt); + + code = blockDataEnsureCapacity(pResBlock, rows); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + goto end; + } + + // int64_t st = taosGetTimestampUs(); + for (int32_t i = 0; i < rows; i++) { + int64_t* uid = taosArrayGet(uidList, i); + for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) { + SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j); + + if (pColInfo->info.colId == -1) { // tbname + char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + metaGetTableNameByUid(metaHandle, *uid, str); + colDataAppend(pColInfo, i, str, false); +#if TAG_FILTER_DEBUG + qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2); +#endif + } else { + void* tag = taosHashGet(tags, uid, sizeof(int64_t)); + ASSERT(tag); + STagVal tagVal = {0}; + tagVal.cid = pColInfo->info.colId; + const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal); + + if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { + colDataAppend(pColInfo, i, p, true); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { + colDataAppend(pColInfo, i, p, false); + } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { + char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1); + varDataSetLen(tmp, tagVal.nData); + memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); + colDataAppend(pColInfo, i, tmp, false); +#if TAG_FILTER_DEBUG + qDebug("tagfilter varch:%s", tmp + 2); +#endif + taosMemoryFree(tmp); + } else { + colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false); +#if TAG_FILTER_DEBUG + if (pColInfo->info.type == TSDB_DATA_TYPE_INT) { + qDebug("tagfilter int:%d", *(int*)(&tagVal.i64)); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) { + qDebug("tagfilter double:%f", *(double*)(&tagVal.i64)); + } +#endif + } + } + } + } + pResBlock->info.rows = rows; + + // int64_t st1 = taosGetTimestampUs(); + // qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st); + + pBlockList = taosArrayInit(2, POINTER_BYTES); + taosArrayPush(pBlockList, &pResBlock); + + SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; + code = createResultData(&type, rows, &output); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + qError("failed to create result, reason:%s", tstrerror(code)); + terrno = code; + goto end; + } + + code = scalarCalculate(pTagCond, pBlockList, &output); + if (code != TSDB_CODE_SUCCESS) { + qError("failed to calculate scalar, reason:%s", tstrerror(code)); + terrno = code; + goto end; + } + // int64_t st2 = taosGetTimestampUs(); + // qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1); + +end: + taosHashCleanup(tags); + taosHashCleanup(ctx.colHash); + taosArrayDestroy(ctx.cInfoList); + blockDataDestroy(pResBlock); + taosArrayDestroy(pBlockList); + return output.columnData; +} + +static void releaseColInfoData(void* pCol) { + if (pCol) { + SColumnInfoData* col = (SColumnInfoData*)pCol; + colDataDestroy(col); + taosMemoryFree(col); + } +} + +int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableListInfo* pTableListInfo) { + int32_t code = TSDB_CODE_SUCCESS; + SArray* pBlockList = NULL; + SSDataBlock* pResBlock = NULL; + SHashObj* tags = NULL; + SArray* uidList = NULL; + void* keyBuf = NULL; + SArray* groupData = NULL; + + int32_t rows = taosArrayGetSize(pTableListInfo->pTableList); + if (rows == 0) { + return TDB_CODE_SUCCESS; + } + + tagFilterAssist ctx = {0}; + ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); + if (ctx.colHash == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + ctx.index = 0; + ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); + if (ctx.cInfoList == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + + SNode* pNode = NULL; + FOREACH(pNode, group) { + nodesRewriteExprPostOrder(&pNode, getColumn, (void*)&ctx); + REPLACE_NODE(pNode); + } + + pResBlock = createDataBlock(); + if (pResBlock == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + + for (int32_t i = 0; i < taosArrayGetSize(ctx.cInfoList); ++i) { + SColumnInfoData colInfo = {{0}, 0}; + colInfo.info = *(SColumnInfo*)taosArrayGet(ctx.cInfoList, i); + blockDataAppendColInfo(pResBlock, &colInfo); + } + + uidList = taosArrayInit(rows, sizeof(uint64_t)); + for (int32_t i = 0; i < rows; ++i) { + STableKeyInfo* pkeyInfo = taosArrayGet(pTableListInfo->pTableList, i); + taosArrayPush(uidList, &pkeyInfo->uid); + } + + // int64_t stt = taosGetTimestampUs(); + tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + code = metaGetTableTags(metaHandle, pTableListInfo->suid, uidList, tags); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + + // int64_t stt1 = taosGetTimestampUs(); + // qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt); + + code = blockDataEnsureCapacity(pResBlock, rows); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + + // int64_t st = taosGetTimestampUs(); + for (int32_t i = 0; i < rows; i++) { + int64_t* uid = taosArrayGet(uidList, i); + for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) { + SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j); + + if (pColInfo->info.colId == -1) { // tbname + char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + metaGetTableNameByUid(metaHandle, *uid, str); + colDataAppend(pColInfo, i, str, false); +#if TAG_FILTER_DEBUG + qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2); +#endif + } else { + void* tag = taosHashGet(tags, uid, sizeof(int64_t)); + ASSERT(tag); + STagVal tagVal = {0}; + tagVal.cid = pColInfo->info.colId; + const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal); + + if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { + colDataAppend(pColInfo, i, p, true); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { + colDataAppend(pColInfo, i, p, false); + } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { + char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1); + varDataSetLen(tmp, tagVal.nData); + memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); + colDataAppend(pColInfo, i, tmp, false); +#if TAG_FILTER_DEBUG + qDebug("tagfilter varch:%s", tmp + 2); +#endif + taosMemoryFree(tmp); + } else { + colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false); +#if TAG_FILTER_DEBUG + if (pColInfo->info.type == TSDB_DATA_TYPE_INT) { + qDebug("tagfilter int:%d", *(int*)(&tagVal.i64)); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) { + qDebug("tagfilter double:%f", *(double*)(&tagVal.i64)); + } +#endif + } + } + } + } + pResBlock->info.rows = rows; + + // int64_t st1 = taosGetTimestampUs(); + // qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st); + + pBlockList = taosArrayInit(2, POINTER_BYTES); + taosArrayPush(pBlockList, &pResBlock); + + groupData = taosArrayInit(2, POINTER_BYTES); + FOREACH(pNode, group) { + SScalarParam output = {0}; + + switch (nodeType(pNode)) { + case QUERY_NODE_VALUE: + break; + case QUERY_NODE_COLUMN: + case QUERY_NODE_OPERATOR: + case QUERY_NODE_FUNCTION: { + SExprNode* expNode = (SExprNode*)pNode; + code = createResultData(&expNode->resType, rows, &output); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + break; + } + default: + code = TSDB_CODE_OPS_NOT_SUPPORT; + goto end; + } + if (nodeType(pNode) == QUERY_NODE_COLUMN) { + SColumnNode* pSColumnNode = (SColumnNode*)pNode; + SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, pSColumnNode->slotId); + code = colDataAssign(output.columnData, pColInfo, rows, NULL); + } else if (nodeType(pNode) == QUERY_NODE_VALUE) { + continue; + } else { + code = scalarCalculate(pNode, pBlockList, &output); + } + if (code != TSDB_CODE_SUCCESS) { + releaseColInfoData(output.columnData); + goto end; + } + taosArrayPush(groupData, &output.columnData); + } + + int32_t keyLen = 0; + SNode* node; + FOREACH(node, group) { + SExprNode* pExpr = (SExprNode*)node; + keyLen += pExpr->resType.bytes; + } + + int32_t nullFlagSize = sizeof(int8_t) * LIST_LENGTH(group); + keyLen += nullFlagSize; + + keyBuf = taosMemoryCalloc(1, keyLen); + if (keyBuf == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + for (int i = 0; i < rows; i++) { + STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i); + + char* isNull = (char*)keyBuf; + char* pStart = (char*)keyBuf + sizeof(int8_t) * LIST_LENGTH(group); + for (int j = 0; j < taosArrayGetSize(groupData); j++) { + SColumnInfoData* pValue = (SColumnInfoData*)taosArrayGetP(groupData, j); + + if (colDataIsNull_s(pValue, i)) { + isNull[j] = 1; + } else { + isNull[j] = 0; + char* data = colDataGetData(pValue, i); + if (pValue->info.type == TSDB_DATA_TYPE_JSON) { + if (tTagIsJson(data)) { + code = TSDB_CODE_QRY_JSON_IN_GROUP_ERROR; + goto end; + } + if (tTagIsJsonNull(data)) { + isNull[j] = 1; + continue; + } + int32_t len = getJsonValueLen(data); + memcpy(pStart, data, len); + pStart += len; + } else if (IS_VAR_DATA_TYPE(pValue->info.type)) { + memcpy(pStart, data, varDataTLen(data)); + pStart += varDataTLen(data); + } else { + memcpy(pStart, data, pValue->info.bytes); + pStart += pValue->info.bytes; + } + } + } + + int32_t len = (int32_t)(pStart - (char*)keyBuf); + info->groupId = calcGroupId(keyBuf, len); + taosHashPut(pTableListInfo->map, &(info->uid), sizeof(uint64_t), &info->groupId, sizeof(uint64_t)); + } + + // int64_t st2 = taosGetTimestampUs(); + // qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1); + +end: + taosMemoryFreeClear(keyBuf); + taosHashCleanup(tags); + taosHashCleanup(ctx.colHash); + taosArrayDestroy(ctx.cInfoList); + blockDataDestroy(pResBlock); + taosArrayDestroy(pBlockList); + taosArrayDestroy(uidList); + taosArrayDestroyP(groupData, releaseColInfoData); + return code; +} + int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, STableListInfo* pListInfo) { int32_t code = TSDB_CODE_SUCCESS; @@ -308,63 +742,72 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, } uint64_t tableUid = pScanNode->uid; - pListInfo->suid = pScanNode->suid; + SArray* res = taosArrayInit(8, sizeof(uint64_t)); if (pScanNode->tableType == TSDB_SUPER_TABLE) { if (pTagIndexCond) { SIndexMetaArg metaArg = { .metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = tsdbGetIvtIdx(metaHandle), .suid = tableUid}; - SArray* res = taosArrayInit(8, sizeof(uint64_t)); + // int64_t stt = taosGetTimestampUs(); SIdxFltStatus status = SFLT_NOT_INDEX; code = doFilterTag(pTagIndexCond, &metaArg, res, &status); if (code != 0 || status == SFLT_NOT_INDEX) { qError("failed to get tableIds from index, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid); - // code = TSDB_CODE_INDEX_REBUILDING; - code = vnodeGetAllTableList(pVnode, tableUid, pListInfo->pTableList); - } else { - qDebug("success to get tableIds, size:%d, suid:%" PRIu64, (int)taosArrayGetSize(res), tableUid); + code = TDB_CODE_SUCCESS; } - for (int i = 0; i < taosArrayGetSize(res); i++) { - STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0}; - taosArrayPush(pListInfo->pTableList, &info); - } - taosArrayDestroy(res); - } else { - code = vnodeGetAllTableList(pVnode, tableUid, pListInfo->pTableList); - } - - if (code != TSDB_CODE_SUCCESS) { - qError("failed to get tableIds, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid); - terrno = code; - return code; + // int64_t stt1 = taosGetTimestampUs(); + // qDebug("generate table list, cost:%ld us", stt1-stt); + } else if (!pTagCond) { + vnodeGetCtbIdList(pVnode, pScanNode->suid, res); } } else { // Create one table group. - STableKeyInfo info = {.uid = tableUid, .groupId = 0}; - taosArrayPush(pListInfo->pTableList, &info); + if (metaIsTableExist(metaHandle, tableUid)) { + taosArrayPush(res, &tableUid); + } } if (pTagCond) { - int32_t i = 0; - while (i < taosArrayGetSize(pListInfo->pTableList)) { - STableKeyInfo* info = taosArrayGet(pListInfo->pTableList, i); - - bool qualified = true; - code = isQualifiedTable(info, pTagCond, metaHandle, &qualified); - if (code != TSDB_CODE_SUCCESS) { - return code; - } + terrno = TDB_CODE_SUCCESS; + SColumnInfoData* pColInfoData = getColInfoResult(metaHandle, pListInfo->suid, res, pTagCond); + if (terrno != TDB_CODE_SUCCESS) { + colDataDestroy(pColInfoData); + taosMemoryFreeClear(pColInfoData); + taosArrayDestroy(res); + qError("failed to getColInfoResult, code: %s", tstrerror(terrno)); + return terrno; + } - if (!qualified) { - taosArrayRemove(pListInfo->pTableList, i); + int32_t i = 0; + int32_t j = 0; + int32_t len = taosArrayGetSize(res); + while (i < taosArrayGetSize(res) && j < len && pColInfoData) { + void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes); + + int64_t* uid = taosArrayGet(res, i); + qDebug("tagfilter get uid:%ld, res:%d", *uid, *(bool*)var); + if (*(bool*)var == false) { + taosArrayRemove(res, i); + j++; continue; } i++; + j++; } + colDataDestroy(pColInfoData); + taosMemoryFreeClear(pColInfoData); + } + + for (int i = 0; i < taosArrayGetSize(res); i++) { + STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0}; + taosArrayPush(pListInfo->pTableList, &info); + qDebug("tagfilter get uid:%ld", info.uid); } + taosArrayDestroy(res); + pListInfo->pGroupList = taosArrayInit(4, POINTER_BYTES); if (pListInfo->pGroupList == NULL) { return TSDB_CODE_OUT_OF_MEMORY; @@ -391,7 +834,10 @@ size_t getTableTagsBufLen(const SNodeList* pGroups) { int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId) { SMetaReader mr = {0}; metaReaderInit(&mr, pMeta, 0); - metaGetTableEntryByUid(&mr, uid); + if (metaGetTableEntryByUid(&mr, uid) != 0) { // table not exist + metaReaderClear(&mr); + return TSDB_CODE_PAR_TABLE_NOT_EXIST; + } SNodeList* groupNew = nodesCloneList(pGroupNode); @@ -546,7 +992,7 @@ static SResSchema createResSchema(int32_t type, int32_t bytes, int32_t slotId, i return s; } -static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDataType* pType) { +static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDataType* pType, EColumnType colType) { SColumn* pCol = taosMemoryCalloc(1, sizeof(SColumn)); if (pCol == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -560,7 +1006,7 @@ static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDa pCol->scale = pType->scale; pCol->precision = pType->precision; pCol->dataBlockId = blockId; - + pCol->colType = colType; return pCol; } @@ -604,7 +1050,8 @@ SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* SDataType* pType = &pColNode->node.resType; pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale, pType->precision, pColNode->colName); - pExp->base.pParam[0].pCol = createColumn(pColNode->dataBlockId, pColNode->slotId, pColNode->colId, pType); + pExp->base.pParam[0].pCol = + createColumn(pColNode->dataBlockId, pColNode->slotId, pColNode->colId, pType, pColNode->colType); pExp->base.pParam[0].type = FUNC_PARAM_TYPE_COLUMN; } else if (type == QUERY_NODE_VALUE) { pExp->pExpr->nodeType = QUERY_NODE_VALUE; @@ -656,7 +1103,8 @@ SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* SColumnNode* pcn = (SColumnNode*)p1; pExp->base.pParam[j].type = FUNC_PARAM_TYPE_COLUMN; - pExp->base.pParam[j].pCol = createColumn(pcn->dataBlockId, pcn->slotId, pcn->colId, &pcn->node.resType); + pExp->base.pParam[j].pCol = + createColumn(pcn->dataBlockId, pcn->slotId, pcn->colId, &pcn->node.resType, pcn->colType); } else if (p1->type == QUERY_NODE_VALUE) { SValueNode* pvn = (SValueNode*)p1; pExp->base.pParam[j].type = FUNC_PARAM_TYPE_VALUE; @@ -730,7 +1178,6 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, SqlFunctionCtx* pCtx = &pFuncCtx[i]; pCtx->functionId = -1; - pCtx->curBufPage = -1; pCtx->pExpr = pExpr; if (pExpr->pExpr->nodeType == QUERY_NODE_FUNCTION) { @@ -771,9 +1218,10 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, pCtx->start.key = INT64_MIN; pCtx->end.key = INT64_MIN; pCtx->numOfParams = pExpr->base.numOfParams; - pCtx->increase = false; + pCtx->isStream = false; pCtx->param = pFunct->pParam; + pCtx->saveHandle.currentPage = -1; } for (int32_t i = 1; i < numOfOutput; ++i) { @@ -852,6 +1300,7 @@ int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysi pCond->type = TIMEWINDOW_RANGE_CONTAINED; pCond->startVersion = -1; pCond->endVersion = -1; + pCond->schemaVersion = -1; // pCond->type = pTableScanNode->scanFlag; int32_t j = 0; diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 7115ad85a50884d21f496a900d21f0e954c07ea0..f1ac9ef8b18ecbd74ee200f069281c36a7582fcb 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -55,7 +55,7 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu taosArrayClear(pInfo->pBlockLists); if (type == STREAM_INPUT__MERGED_SUBMIT) { - ASSERT(numOfBlocks > 1); + // ASSERT(numOfBlocks > 1); for (int32_t i = 0; i < numOfBlocks; i++) { SSubmitReq* pReq = *(void**)POINTER_SHIFT(input, i * sizeof(void*)); taosArrayPush(pInfo->pBlockLists, &pReq); @@ -139,8 +139,24 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols, SSchemaWrapper** pSchema) { if (msg == NULL) { - // TODO create raw scan - return NULL; + // create raw scan + + SExecTaskInfo* pTaskInfo = taosMemoryCalloc(1, sizeof(SExecTaskInfo)); + if (NULL == pTaskInfo) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED); + + pTaskInfo->cost.created = taosGetTimestampMs(); + pTaskInfo->execModel = OPTR_EXEC_MODEL_QUEUE; + pTaskInfo->pRoot = createRawScanOperatorInfo(readers, pTaskInfo); + if (NULL == pTaskInfo->pRoot) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + taosMemoryFree(pTaskInfo); + return NULL; + } + return pTaskInfo; } struct SSubplan* pPlan = NULL; @@ -348,16 +364,18 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, taosThreadOnce(&initPoolOnce, initRefPool); atexit(cleanupRefPool); - qDebug("start to create subplan task, TID:0x%"PRIx64 " QID:0x%"PRIx64, taskId, pSubplan->id.queryId); + qDebug("start to create subplan task, TID:0x%" PRIx64 " QID:0x%" PRIx64, taskId, pSubplan->id.queryId); int32_t code = createExecTaskInfoImpl(pSubplan, pTask, readHandle, taskId, sql, model); if (code != TSDB_CODE_SUCCESS) { + qError("failed to createExecTaskInfoImpl, code: %s", tstrerror(code)); goto _error; } SDataSinkMgtCfg cfg = {.maxDataBlockNum = 10000, .maxDataBlockNumPerQuery = 5000}; code = dsDataSinkMgtInit(&cfg); if (code != TSDB_CODE_SUCCESS) { + qError("failed to dsDataSinkMgtInit, code: %s", tstrerror(code)); goto _error; } @@ -365,6 +383,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, void* pSinkParam = NULL; code = createDataSinkParam(pSubplan->pDataSink, &pSinkParam, pTaskInfo, readHandle); if (code != TSDB_CODE_SUCCESS) { + qError("failed to createDataSinkParam, code: %s", tstrerror(code)); goto _error; } @@ -374,7 +393,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, } } - qDebug("subplan task create completed, TID:0x%"PRIx64 " QID:0x%"PRIx64, taskId, pSubplan->id.queryId); + qDebug("subplan task create completed, TID:0x%" PRIx64 " QID:0x%" PRIx64, taskId, pSubplan->id.queryId); _error: // if failed to add ref for all tables in this query, abort current query @@ -427,7 +446,7 @@ int waitMoment(SQInfo* pQInfo) { #endif static void freeBlock(void* param) { - SSDataBlock* pBlock = *(SSDataBlock**) param; + SSDataBlock* pBlock = *(SSDataBlock**)param; blockDataDestroy(pBlock); } @@ -467,12 +486,12 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds) { qDebug("%s execTask is launched", GET_TASKID(pTaskInfo)); - int32_t current = 0; + int32_t current = 0; SSDataBlock* pRes = NULL; int64_t st = taosGetTimestampUs(); - while((pRes = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot)) != NULL) { + while ((pRes = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot)) != NULL) { SSDataBlock* p = createOneDataBlock(pRes, true); current += p->info.rows; ASSERT(p->info.rows > 0); @@ -494,7 +513,7 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds) { uint64_t total = pTaskInfo->pRoot->resultInfo.totalRows; qDebug("%s task suspended, %d rows in %d blocks returned, total:%" PRId64 " rows, in sinkNode:%d, elapsed:%.2f ms", - GET_TASKID(pTaskInfo), current, (int32_t) taosArrayGetSize(pResList), total, 0, el / 1000.0); + GET_TASKID(pTaskInfo), current, (int32_t)taosArrayGetSize(pResList), total, 0, el / 1000.0); atomic_store_64(&pTaskInfo->owner, 0); return pTaskInfo->code; @@ -632,7 +651,7 @@ int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner) { SOperatorInfo* pOperator = pTaskInfo->pRoot; while (1) { - uint8_t type = pOperator->operatorType; + uint16_t type = pOperator->operatorType; if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { *scanner = pOperator->info; return 0; @@ -666,15 +685,26 @@ void* qExtractReaderFromStreamScanner(void* scanner) { return (void*)pInfo->tqReader; } -const SSchemaWrapper* qExtractSchemaFromStreamScanner(void* scanner) { - SStreamScanInfo* pInfo = scanner; - return pInfo->tqReader->pSchemaWrapper; +const SSchemaWrapper* qExtractSchemaFromTask(qTaskInfo_t tinfo) { + SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; + return pTaskInfo->streamInfo.schema; +} + +const char* qExtractTbnameFromTask(qTaskInfo_t tinfo) { + SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; + return pTaskInfo->streamInfo.tbName; +} + +SMqMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo) { + SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; + ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE); + return &pTaskInfo->streamInfo.metaRsp; } -void* qStreamExtractMetaMsg(qTaskInfo_t tinfo) { +int64_t qStreamExtractPrepareUid(qTaskInfo_t tinfo) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE); - return pTaskInfo->streamInfo.metaBlk; + return pTaskInfo->streamInfo.prepareStatus.uid; } int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) { @@ -684,102 +714,166 @@ int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) { return 0; } -int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) { +int32_t initQueryTableDataCondForTmq(SQueryTableDataCond* pCond, SSnapContext* sContext, SMetaTableInfo mtInfo) { + memset(pCond, 0, sizeof(SQueryTableDataCond)); + pCond->order = TSDB_ORDER_ASC; + pCond->numOfCols = mtInfo.schema->nCols; + pCond->colList = taosMemoryCalloc(pCond->numOfCols, sizeof(SColumnInfo)); + if (pCond->colList == NULL) { + terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; + return terrno; + } + + pCond->twindows = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX}; + pCond->suid = mtInfo.suid; + pCond->type = TIMEWINDOW_RANGE_CONTAINED; + pCond->startVersion = -1; + pCond->endVersion = sContext->snapVersion; + pCond->schemaVersion = sContext->snapVersion; + + for (int32_t i = 0; i < pCond->numOfCols; ++i) { + pCond->colList[i].type = mtInfo.schema->pSchema[i].type; + pCond->colList[i].bytes = mtInfo.schema->pSchema[i].bytes; + pCond->colList[i].colId = mtInfo.schema->pSchema[i].colId; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; SOperatorInfo* pOperator = pTaskInfo->pRoot; ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE); pTaskInfo->streamInfo.prepareStatus = *pOffset; - if (!tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus)) { - while (1) { - uint8_t type = pOperator->operatorType; - pOperator->status = OP_OPENED; - // TODO add more check - if (type != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { - ASSERT(pOperator->numOfDownstream == 1); - pOperator = pOperator->pDownstream[0]; - } + if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus)) { + return 0; + } + if (subType == TOPIC_SUB_TYPE__COLUMN) { + uint16_t type = pOperator->operatorType; + pOperator->status = OP_OPENED; + // TODO add more check + if (type != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { + ASSERT(pOperator->numOfDownstream == 1); + pOperator = pOperator->pDownstream[0]; + } - SStreamScanInfo* pInfo = pOperator->info; - if (pOffset->type == TMQ_OFFSET__LOG) { - STableScanInfo* pTSInfo = pInfo->pTableScanOp->info; - tsdbReaderClose(pTSInfo->dataReader); - pTSInfo->dataReader = NULL; + SStreamScanInfo* pInfo = pOperator->info; + if (pOffset->type == TMQ_OFFSET__LOG) { + STableScanInfo* pTSInfo = pInfo->pTableScanOp->info; + tsdbReaderClose(pTSInfo->dataReader); + pTSInfo->dataReader = NULL; #if 0 - if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus) && - pInfo->tqReader->pWalReader->curVersion != pOffset->version) { - qError("prepare scan ver %" PRId64 " actual ver %" PRId64 ", last %" PRId64, pOffset->version, - pInfo->tqReader->pWalReader->curVersion, pTaskInfo->streamInfo.lastStatus.version); - ASSERT(0); - } + if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus) && + pInfo->tqReader->pWalReader->curVersion != pOffset->version) { + qError("prepare scan ver %" PRId64 " actual ver %" PRId64 ", last %" PRId64, pOffset->version, + pInfo->tqReader->pWalReader->curVersion, pTaskInfo->streamInfo.lastStatus.version); + ASSERT(0); + } #endif - if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) { + if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) { + return -1; + } + ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version + 1); + } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) { + /*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/ + int64_t uid = pOffset->uid; + int64_t ts = pOffset->ts; + + if (uid == 0) { + if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) { + STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0); + uid = pTableInfo->uid; + ts = INT64_MIN; + } else { return -1; } - ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version + 1); - } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) { - /*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/ - int64_t uid = pOffset->uid; - int64_t ts = pOffset->ts; - - if (uid == 0) { - if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) { - STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0); - uid = pTableInfo->uid; - ts = INT64_MIN; - } else { - return -1; - } - } + } - /*if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA ||*/ - /*pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) {*/ - STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info; - int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList); + /*if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA ||*/ + /*pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) {*/ + STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info; + int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList); #ifndef NDEBUG - - qDebug("switch to next table %" PRId64 " (cursor %d), %" PRId64 " rows returned", uid, - pTableScanInfo->currentTable, pInfo->pTableScanOp->resultInfo.totalRows); - pInfo->pTableScanOp->resultInfo.totalRows = 0; + qDebug("switch to next table %" PRId64 " (cursor %d), %" PRId64 " rows returned", uid, + pTableScanInfo->currentTable, pInfo->pTableScanOp->resultInfo.totalRows); + pInfo->pTableScanOp->resultInfo.totalRows = 0; #endif - bool found = false; - for (int32_t i = 0; i < tableSz; i++) { - STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i); - if (pTableInfo->uid == uid) { - found = true; - pTableScanInfo->currentTable = i; - break; - } + bool found = false; + for (int32_t i = 0; i < tableSz; i++) { + STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i); + if (pTableInfo->uid == uid) { + found = true; + pTableScanInfo->currentTable = i; + break; } + } - // TODO after dropping table, table may be not found - ASSERT(found); + // TODO after dropping table, table may be not found + ASSERT(found); - if (pTableScanInfo->dataReader == NULL) { - if (tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &pTableScanInfo->cond, - pTaskInfo->tableqinfoList.pTableList, &pTableScanInfo->dataReader, NULL) < 0 || - pTableScanInfo->dataReader == NULL) { - ASSERT(0); - } + if (pTableScanInfo->dataReader == NULL) { + if (tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &pTableScanInfo->cond, + pTaskInfo->tableqinfoList.pTableList, &pTableScanInfo->dataReader, NULL) < 0 || + pTableScanInfo->dataReader == NULL) { + ASSERT(0); } + } - tsdbSetTableId(pTableScanInfo->dataReader, uid); - int64_t oldSkey = pTableScanInfo->cond.twindows.skey; - pTableScanInfo->cond.twindows.skey = ts + 1; - tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond); - pTableScanInfo->cond.twindows.skey = oldSkey; - pTableScanInfo->scanTimes = 0; + tsdbSetTableId(pTableScanInfo->dataReader, uid); + int64_t oldSkey = pTableScanInfo->cond.twindows.skey; + pTableScanInfo->cond.twindows.skey = ts + 1; + tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond); + pTableScanInfo->cond.twindows.skey = oldSkey; + pTableScanInfo->scanTimes = 0; - qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid, - ts, pTableScanInfo->currentTable, tableSz); - /*}*/ + qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid, + ts, pTableScanInfo->currentTable, tableSz); + /*}*/ + } else { + ASSERT(0); + } + } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) { + SStreamRawScanInfo* pInfo = pOperator->info; + SSnapContext* sContext = pInfo->sContext; + if (setForSnapShot(sContext, pOffset->uid) != 0) { + qError("setDataForSnapShot error. uid:%" PRIi64, pOffset->uid); + return -1; + } - } else { - ASSERT(0); - } - return 0; + SMetaTableInfo mtInfo = getUidfromSnapShot(sContext); + tsdbReaderClose(pInfo->dataReader); + pInfo->dataReader = NULL; + cleanupQueryTableDataCond(&pTaskInfo->streamInfo.tableCond); + taosArrayDestroy(pTaskInfo->tableqinfoList.pTableList); + if (mtInfo.uid == 0) return 0; // no data + + initQueryTableDataCondForTmq(&pTaskInfo->streamInfo.tableCond, sContext, mtInfo); + pTaskInfo->streamInfo.tableCond.twindows.skey = pOffset->ts; + pTaskInfo->tableqinfoList.pTableList = taosArrayInit(1, sizeof(STableKeyInfo)); + taosArrayPush(pTaskInfo->tableqinfoList.pTableList, &(STableKeyInfo){.uid = mtInfo.uid, .groupId = 0}); + tsdbReaderOpen(pInfo->vnode, &pTaskInfo->streamInfo.tableCond, pTaskInfo->tableqinfoList.pTableList, + &pInfo->dataReader, NULL); + + strcpy(pTaskInfo->streamInfo.tbName, mtInfo.tbName); + tDeleteSSchemaWrapper(pTaskInfo->streamInfo.schema); + pTaskInfo->streamInfo.schema = mtInfo.schema; + + qDebug("tmqsnap qStreamPrepareScan snapshot data uid %ld ts %ld", mtInfo.uid, pOffset->ts); + } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_META) { + SStreamRawScanInfo* pInfo = pOperator->info; + SSnapContext* sContext = pInfo->sContext; + if (setForSnapShot(sContext, pOffset->uid) != 0) { + qError("setForSnapShot error. uid:%" PRIi64 " ,version:%" PRIi64, pOffset->uid); + return -1; } + qDebug("tmqsnap qStreamPrepareScan snapshot meta uid %ld ts %ld", pOffset->uid); + } else if (pOffset->type == TMQ_OFFSET__LOG) { + SStreamRawScanInfo* pInfo = pOperator->info; + tsdbReaderClose(pInfo->dataReader); + pInfo->dataReader = NULL; + qDebug("tmqsnap qStreamPrepareScan snapshot log"); } return 0; } diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 86b90364548a3a82599c99ee2a34c29593373ba4..f3ff13ef856e1beee6d0653a811fb050a8ab1468 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -86,19 +86,17 @@ static int32_t getExprFunctionId(SExprInfo* pExprInfo) { return 0; } -static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type, int16_t bytes); - -static void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* pSDataBlock); +static void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* pBlock); static void releaseQueryBuf(size_t numOfTables); -static void destroyFillOperatorInfo(void* param, int32_t numOfOutput); -static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput); -static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput); -static void destroyAggOperatorInfo(void* param, int32_t numOfOutput); +static void destroyFillOperatorInfo(void* param); +static void destroyProjectOperatorInfo(void* param); +static void destroyOrderOperatorInfo(void* param); +static void destroyAggOperatorInfo(void* param); -static void destroyIntervalOperatorInfo(void* param, int32_t numOfOutput); -static void destroyExchangeOperatorInfo(void* param, int32_t numOfOutput); +static void destroyIntervalOperatorInfo(void* param); +static void destroyExchangeOperatorInfo(void* param); static void destroyOperatorInfo(SOperatorInfo* pOperator); @@ -142,20 +140,6 @@ static int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, static void initCtxOutputBuffer(SqlFunctionCtx* pCtx, int32_t size); static void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId); -// setup the output buffer for each operator -static bool hasNull(SColumn* pColumn, SColumnDataAgg* pStatis) { - if (TSDB_COL_IS_TAG(pColumn->flag) || TSDB_COL_IS_UD_COL(pColumn->flag) || - pColumn->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { - return false; - } - - if (pStatis != NULL && pStatis->numOfNull == 0) { - return false; - } - - return true; -} - #if 0 static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, char* pData, int16_t bytes, bool masterscan, uint64_t uid) { @@ -195,26 +179,23 @@ static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pR } #endif -SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize) { +SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize) { SFilePage* pData = NULL; // in the first scan, new space needed for results int32_t pageId = -1; - SIDList list = getDataBufPagesIdList(pResultBuf, tableGroupId); - - if (taosArrayGetSize(list) == 0) { - pData = getNewBufPage(pResultBuf, tableGroupId, &pageId); + if (*currentPageId == -1) { + pData = getNewBufPage(pResultBuf, &pageId); pData->num = sizeof(SFilePage); } else { - SPageInfo* pi = getLastPageInfo(list); - pData = getBufPage(pResultBuf, getPageId(pi)); - pageId = getPageId(pi); + pData = getBufPage(pResultBuf, *currentPageId); + pageId = *currentPageId; if (pData->num + interBufSize > getBufPageSize(pResultBuf)) { // release current page first, and prepare the next one - releaseBufPageInfo(pResultBuf, pi); + releaseBufPage(pResultBuf, pData); - pData = getNewBufPage(pResultBuf, tableGroupId, &pageId); + pData = getNewBufPage(pResultBuf, &pageId); if (pData != NULL) { pData->num = sizeof(SFilePage); } @@ -231,9 +212,9 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int SResultRow* pResultRow = (SResultRow*)((char*)pData + pData->num); pResultRow->pageId = pageId; pResultRow->offset = (int32_t)pData->num; + *currentPageId = pageId; pData->num += interBufSize; - return pResultRow; } @@ -250,7 +231,7 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId); SResultRowPosition* p1 = - (SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); + (SResultRowPosition*)tSimpleHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); SResultRow* pResult = NULL; @@ -272,9 +253,6 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR // 1. close current opened time window if (pResultRowInfo->cur.pageId != -1 && ((pResult == NULL) || (pResult->pageId != pResultRowInfo->cur.pageId))) { -#ifdef BUF_PAGE_DEBUG - qDebug("page_1"); -#endif SResultRowPosition pos = pResultRowInfo->cur; SFilePage* pPage = getBufPage(pResultBuf, pos.pageId); releaseBufPage(pResultBuf, pPage); @@ -282,18 +260,15 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR // allocate a new buffer page if (pResult == NULL) { -#ifdef BUF_PAGE_DEBUG - qDebug("page_2"); -#endif ASSERT(pSup->resultRowSize > 0); - pResult = getNewResultRow(pResultBuf, groupId, pSup->resultRowSize); + pResult = getNewResultRow(pResultBuf, &pSup->currentPageId, pSup->resultRowSize); initResultRow(pResult); // add a new result set for a new group SResultRowPosition pos = {.pageId = pResult->pageId, .offset = pResult->offset}; - taosHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos, - sizeof(SResultRowPosition)); + tSimpleHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos, + sizeof(SResultRowPosition)); } // 2. set the new time window to be the new active time window @@ -301,8 +276,8 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR // too many time window in query if (pTaskInfo->execModel == OPTR_EXEC_MODEL_BATCH && - taosHashGetSize(pSup->pResultRowHashTable) > MAX_INTERVAL_TIME_WINDOW) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW); + tSimpleHashGetSize(pSup->pResultRowHashTable) > MAX_INTERVAL_TIME_WINDOW) { + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW); } return pResult; @@ -318,10 +293,10 @@ static int32_t addNewWindowResultBuf(SResultRow* pWindowRes, SDiskbasedBuf* pRes // in the first scan, new space needed for results int32_t pageId = -1; - SIDList list = getDataBufPagesIdList(pResultBuf, tid); + SIDList list = getDataBufPagesIdList(pResultBuf); if (taosArrayGetSize(list) == 0) { - pData = getNewBufPage(pResultBuf, tid, &pageId); + pData = getNewBufPage(pResultBuf, &pageId); pData->num = sizeof(SFilePage); } else { SPageInfo* pi = getLastPageInfo(list); @@ -332,7 +307,7 @@ static int32_t addNewWindowResultBuf(SResultRow* pWindowRes, SDiskbasedBuf* pRes // release current page first, and prepare the next one releaseBufPageInfo(pResultBuf, pi); - pData = getNewBufPage(pResultBuf, tid, &pageId); + pData = getNewBufPage(pResultBuf, &pageId); if (pData != NULL) { pData->num = sizeof(SFilePage); } @@ -372,15 +347,30 @@ void initExecTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pQueryWindow void cleanupExecTimeWindowInfo(SColumnInfoData* pColData) { colDataDestroy(pColData); } -void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow* pWin, - SColumnInfoData* pTimeWindowData, int32_t offset, int32_t forwardStep, TSKEY* tsCol, - int32_t numOfTotal, int32_t numOfOutput, int32_t order) { +typedef struct { + bool hasAgg; + int32_t numOfRows; + int32_t startOffset; +} SFunctionCtxStatus; + +static void functionCtxSave(SqlFunctionCtx* pCtx, SFunctionCtxStatus* pStatus) { + pStatus->hasAgg = pCtx->input.colDataAggIsSet; + pStatus->numOfRows = pCtx->input.numOfRows; + pStatus->startOffset = pCtx->input.startRowIndex; +} + +static void functionCtxRestore(SqlFunctionCtx* pCtx, SFunctionCtxStatus* pStatus) { + pCtx->input.colDataAggIsSet = pStatus->hasAgg; + pCtx->input.numOfRows = pStatus->numOfRows; + pCtx->input.startRowIndex = pStatus->startOffset; +} + +void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfoData* pTimeWindowData, int32_t offset, + int32_t forwardStep, int32_t numOfTotal, int32_t numOfOutput) { for (int32_t k = 0; k < numOfOutput; ++k) { // keep it temporarily - // todo no need this?? - bool hasAgg = pCtx[k].input.colDataAggIsSet; - int32_t numOfRows = pCtx[k].input.numOfRows; - int32_t startOffset = pCtx[k].input.startRowIndex; + SFunctionCtxStatus status = {0}; + functionCtxSave(&pCtx[k], &status); pCtx[k].input.startRowIndex = offset; pCtx[k].input.numOfRows = forwardStep; @@ -413,14 +403,12 @@ void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow if (code != TSDB_CODE_SUCCESS) { qError("%s apply functions error, code: %s", GET_TASKID(taskInfo), tstrerror(code)); taskInfo->code = code; - longjmp(taskInfo->env, code); + T_LONG_JMP(taskInfo->env, code); } } // restore it - pCtx[k].input.colDataAggIsSet = hasAgg; - pCtx[k].input.startRowIndex = startOffset; - pCtx[k].input.numOfRows = numOfRows; + functionCtxRestore(&pCtx[k], &status); } } } @@ -606,7 +594,7 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc if (pExpr[k].pExpr->nodeType == QUERY_NODE_COLUMN) { // it is a project query SColumnInfoData* pColInfoData = taosArrayGet(pResult->pDataBlock, outputSlotId); if (pResult->info.rows > 0 && !createNewColModel) { - colDataMergeCol(pColInfoData, pResult->info.rows, &pResult->info.capacity, pInputData->pData[0], + colDataMergeCol(pColInfoData, pResult->info.rows, (int32_t*)&pResult->info.capacity, pInputData->pData[0], pInputData->numOfRows); } else { colDataAssign(pColInfoData, pInputData->pData[0], pInputData->numOfRows, &pResult->info); @@ -644,7 +632,7 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc int32_t startOffset = createNewColModel ? 0 : pResult->info.rows; ASSERT(pResult->info.capacity > 0); - colDataMergeCol(pResColData, startOffset, &pResult->info.capacity, &idata, dest.numOfRows); + colDataMergeCol(pResColData, startOffset, (int32_t*)&pResult->info.capacity, &idata, dest.numOfRows); colDataDestroy(&idata); numOfRows = dest.numOfRows; @@ -709,7 +697,7 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc int32_t startOffset = createNewColModel ? 0 : pResult->info.rows; ASSERT(pResult->info.capacity > 0); - colDataMergeCol(pResColData, startOffset, &pResult->info.capacity, &idata, dest.numOfRows); + colDataMergeCol(pResColData, startOffset, (int32_t*)&pResult->info.capacity, &idata, dest.numOfRows); colDataDestroy(&idata); numOfRows = dest.numOfRows; @@ -1133,7 +1121,7 @@ int32_t loadDataBlockOnDemand(SExecTaskInfo* pTaskInfo, STableScanInfo* pTableSc if (setResultOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pBlock->info.uid, &win, masterScan, &pResult, groupId, pTableScanInfo->pCtx, pTableScanInfo->numOfOutput, pTableScanInfo->rowEntryInfoOffset) != TSDB_CODE_SUCCESS) { - longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } } } else if (pQueryAttr->stableQuery && (!pQueryAttr->tsCompQuery) && (!pQueryAttr->diffQuery)) { // stable aggregate, not interval aggregate or normal column aggregate @@ -1184,7 +1172,7 @@ int32_t loadDataBlockOnDemand(SExecTaskInfo* pTaskInfo, STableScanInfo* pTableSc if (setResultOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pBlock->info.uid, &win, masterScan, &pResult, groupId, pTableScanInfo->pCtx, pTableScanInfo->numOfOutput, pTableScanInfo->rowEntryInfoOffset) != TSDB_CODE_SUCCESS) { - longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } } } @@ -1476,7 +1464,7 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi if (TAOS_FAILED(code)) { releaseBufPage(pBuf, page); qError("%s ensure result data capacity failed, code %s", GET_TASKID(pTaskInfo), tstrerror(code)); - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } @@ -1488,7 +1476,7 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock); if (TAOS_FAILED(code)) { qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code)); - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) { // do nothing, todo refactor @@ -1562,7 +1550,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprS int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock); if (TAOS_FAILED(code)) { qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code)); - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) { // do nothing, todo refactor @@ -1571,16 +1559,8 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprS // the _wstart needs to copy to 20 following rows, since the results of top-k expands to 20 different rows. SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, slotId); char* in = GET_ROWCELL_INTERBUF(pCtx[j].resultInfo); - if (pCtx[j].increase) { - int64_t ts = *(int64_t*)in; - for (int32_t k = 0; k < pRow->numOfRows; ++k) { - colDataAppend(pColInfoData, pBlock->info.rows + k, (const char*)&ts, pCtx[j].resultInfo->isNullRes); - ts++; - } - } else { - for (int32_t k = 0; k < pRow->numOfRows; ++k) { - colDataAppend(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes); - } + for (int32_t k = 0; k < pRow->numOfRows; ++k) { + colDataAppend(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes); } } } @@ -1717,7 +1697,7 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) { // SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; // while (tsdbNextDataBlock(pTsdbReadHandle)) { // if (isTaskKilled(pRuntimeEnv->qinfo)) { -// longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); +// T_LONG_JMP(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); // } // // tsdbRetrieveDataBlockInfo(pTsdbReadHandle, &blockInfo); @@ -1736,7 +1716,7 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) { // } // // if (terrno != TSDB_CODE_SUCCESS) { -// longjmp(pRuntimeEnv->env, terrno); +// T_LONG_JMP(pRuntimeEnv->env, terrno); // } // } @@ -1900,7 +1880,7 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) { // // // check for error // if (terrno != TSDB_CODE_SUCCESS) { -// longjmp(pRuntimeEnv->env, terrno); +// T_LONG_JMP(pRuntimeEnv->env, terrno); // } // // return true; @@ -2752,7 +2732,7 @@ static SSDataBlock* doSortedMerge(SOperatorInfo* pOperator) { int32_t code = tsortOpen(pInfo->pSortHandle); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } pOperator->status = OP_RES_TO_RETURN; @@ -2835,92 +2815,6 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan } } } -#if 0 -int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) { - uint8_t type = pOperator->operatorType; - - pOperator->status = OP_OPENED; - - if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { - SStreamScanInfo* pScanInfo = pOperator->info; - pScanInfo->blockType = STREAM_INPUT__TABLE_SCAN; - - pScanInfo->pTableScanOp->status = OP_OPENED; - - STableScanInfo* pInfo = pScanInfo->pTableScanOp->info; - ASSERT(pInfo->scanMode == TABLE_SCAN__TABLE_ORDER); - - if (uid == 0) { - pInfo->noTable = 1; - return TSDB_CODE_SUCCESS; - } - - /*if (pSnapShotScanInfo->dataReader == NULL) {*/ - /*pSnapShotScanInfo->dataReader = tsdbReaderOpen(pHandle->vnode, &pSTInfo->cond, tableList, 0, 0);*/ - /*pSnapShotScanInfo->scanMode = TABLE_SCAN__TABLE_ORDER;*/ - /*}*/ - - pInfo->noTable = 0; - - if (pInfo->lastStatus.uid != uid || pInfo->lastStatus.ts != ts) { - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - - int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList); - bool found = false; - for (int32_t i = 0; i < tableSz; i++) { - STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i); - if (pTableInfo->uid == uid) { - found = true; - pInfo->currentTable = i; - } - } - // TODO after processing drop, found can be false - ASSERT(found); - - tsdbSetTableId(pInfo->dataReader, uid); - int64_t oldSkey = pInfo->cond.twindows.skey; - pInfo->cond.twindows.skey = ts + 1; - tsdbReaderReset(pInfo->dataReader, &pInfo->cond); - pInfo->cond.twindows.skey = oldSkey; - pInfo->scanTimes = 0; - - qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid, ts, - pInfo->currentTable, tableSz); - } - - return TSDB_CODE_SUCCESS; - - } else { - if (pOperator->numOfDownstream == 1) { - return doPrepareScan(pOperator->pDownstream[0], uid, ts); - } else if (pOperator->numOfDownstream == 0) { - qError("failed to find stream scan operator to set the input data block"); - return TSDB_CODE_QRY_APP_ERROR; - } else { - qError("join not supported for stream block scan"); - return TSDB_CODE_QRY_APP_ERROR; - } - } -} - -int32_t doGetScanStatus(SOperatorInfo* pOperator, uint64_t* uid, int64_t* ts) { - int32_t type = pOperator->operatorType; - if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { - SStreamScanInfo* pScanInfo = pOperator->info; - STableScanInfo* pSnapShotScanInfo = pScanInfo->pTableScanOp->info; - *uid = pSnapShotScanInfo->lastStatus.uid; - *ts = pSnapShotScanInfo->lastStatus.ts; - } else { - if (pOperator->pDownstream[0] == NULL) { - return TSDB_CODE_INVALID_PARA; - } else { - doGetScanStatus(pOperator->pDownstream[0], uid, ts); - } - } - - return TSDB_CODE_SUCCESS; -} -#endif // this is a blocking operator static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) { @@ -2947,7 +2841,7 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) { int32_t code = getTableScanInfo(pOperator, &order, &scanFlag); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } // there is an scalar expression that needs to be calculated before apply the group aggregation. @@ -2955,7 +2849,7 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) { SExprSupp* pSup1 = &pAggInfo->scalarExprSup; code = projectApplyFunctions(pSup1->pExprInfo, pBlock, pBlock, pSup1->pCtx, pSup1->numOfExprs, NULL); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } @@ -2964,7 +2858,7 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) { setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, true); code = doAggregateImpl(pOperator, pSup->pCtx); if (code != 0) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } @@ -3017,7 +2911,7 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len } SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info); SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo)); - int32_t size = taosHashGetSize(pSup->pResultRowHashTable); + int32_t size = tSimpleHashGetSize(pSup->pResultRowHashTable); size_t keyLen = sizeof(uint64_t) * 2; // estimate the key length int32_t totalSize = sizeof(int32_t) + sizeof(int32_t) + size * (sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize); @@ -3045,9 +2939,10 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len setBufPageDirty(pPage, true); releaseBufPage(pSup->pResultBuf, pPage); - void* pIter = taosHashIterate(pSup->pResultRowHashTable, NULL); - while (pIter) { - void* key = taosHashGetKey(pIter, &keyLen); + int32_t iter = 0; + void* pIter = NULL; + while ((pIter = tSimpleHashIterate(pSup->pResultRowHashTable, pIter, &iter))) { + void* key = tSimpleHashGetKey(pIter, &keyLen); SResultRowPosition* p1 = (SResultRowPosition*)pIter; pPage = (SFilePage*)getBufPage(pSup->pResultBuf, p1->pageId); @@ -3078,8 +2973,6 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len offset += sizeof(int32_t); memcpy(*result + offset, pRow, pSup->resultRowSize); offset += pSup->resultRowSize; - - pIter = taosHashIterate(pSup->pResultRowHashTable, pIter); } *(int32_t*)(*result) = offset; @@ -3107,14 +3000,14 @@ int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) { offset += sizeof(int32_t); uint64_t tableGroupId = *(uint64_t*)(result + offset); - SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, tableGroupId, pSup->resultRowSize); + SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, &pSup->currentPageId, pSup->resultRowSize); if (!resultRow) { return TSDB_CODE_TSC_INVALID_INPUT; } // add a new result set for a new group SResultRowPosition pos = {.pageId = resultRow->pageId, .offset = resultRow->offset}; - taosHashPut(pSup->pResultRowHashTable, result + offset, keyLen, &pos, sizeof(SResultRowPosition)); + tSimpleHashPut(pSup->pResultRowHashTable, result + offset, keyLen, &pos, sizeof(SResultRowPosition)); offset += keyLen; int32_t valueLen = *(int32_t*)(result + offset); @@ -3131,6 +3024,7 @@ int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) { initResultRow(resultRow); pInfo->resultRowInfo.cur = (SResultRowPosition){.pageId = resultRow->pageId, .offset = resultRow->offset}; + // releaseBufPage(pSup->pResultBuf, getBufPage(pSup->pResultBuf, pageId)); } if (offset != length) { @@ -3217,8 +3111,8 @@ int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDa } static void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t order, int32_t scanFlag); -static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo *pOperator, SFillOperatorInfo* pInfo, SResultInfo* pResultInfo, - SExecTaskInfo* pTaskInfo) { +static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo* pOperator, SFillOperatorInfo* pInfo, + SResultInfo* pResultInfo, SExecTaskInfo* pTaskInfo) { pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows; SSDataBlock* pResBlock = pInfo->pFinalRes; @@ -3230,6 +3124,7 @@ static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo *pOperator, SFillOp Q_STATUS_EQUAL(pTaskInfo->status, TASK_COMPLETED) ? pInfo->win.ekey : pInfo->existNewGroupBlock->info.window.ekey; taosResetFillInfo(pInfo->pFillInfo, getFillInfoStart(pInfo->pFillInfo)); + blockDataCleanup(pInfo->pRes); doApplyScalarCalculation(pOperator, pInfo->existNewGroupBlock, order, scanFlag); taosFillSetStartInfo(pInfo->pFillInfo, pInfo->pRes->info.rows, ekey); @@ -3242,8 +3137,8 @@ static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo *pOperator, SFillOp pInfo->existNewGroupBlock = NULL; } -static void doHandleRemainBlockFromNewGroup(SOperatorInfo* pOperator, SFillOperatorInfo* pInfo, SResultInfo* pResultInfo, - SExecTaskInfo* pTaskInfo) { +static void doHandleRemainBlockFromNewGroup(SOperatorInfo* pOperator, SFillOperatorInfo* pInfo, + SResultInfo* pResultInfo, SExecTaskInfo* pTaskInfo) { if (taosFillHasMoreResults(pInfo->pFillInfo)) { int32_t numOfResultRows = pResultInfo->capacity - pInfo->pFinalRes->info.rows; taosFillResultDataBlock(pInfo->pFillInfo, pInfo->pFinalRes, numOfResultRows); @@ -3259,8 +3154,8 @@ static void doHandleRemainBlockFromNewGroup(SOperatorInfo* pOperator, SFillOpera static void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t order, int32_t scanFlag) { SFillOperatorInfo* pInfo = pOperator->info; - SExprSupp* pSup = &pOperator->exprSupp; - SSDataBlock* pResBlock = pInfo->pFinalRes; + SExprSupp* pSup = &pOperator->exprSupp; + SSDataBlock* pResBlock = pInfo->pFinalRes; setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, false); projectApplyFunctions(pSup->pExprInfo, pInfo->pRes, pBlock, pSup->pCtx, pSup->numOfExprs, NULL); @@ -3270,13 +3165,13 @@ static void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlo SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, pInfo->primarySrcSlotId); colDataAssign(pDst, pSrc, pInfo->pRes->info.rows, &pResBlock->info); - for(int32_t i = 0; i < pInfo->numOfNotFillExpr; ++i) { + for (int32_t i = 0; i < pInfo->numOfNotFillExpr; ++i) { SFillColInfo* pCol = &pInfo->pFillInfo->pFillCol[i + pInfo->numOfExpr]; ASSERT(pCol->notFillCol); SExprInfo* pExpr = pCol->pExpr; - int32_t srcSlotId = pExpr->base.pParam[0].pCol->slotId; - int32_t dstSlotId = pExpr->base.resSchema.slotId; + int32_t srcSlotId = pExpr->base.pParam[0].pCol->slotId; + int32_t dstSlotId = pExpr->base.resSchema.slotId; SColumnInfoData* pDst1 = taosArrayGet(pInfo->pRes->pDataBlock, dstSlotId); SColumnInfoData* pSrc1 = taosArrayGet(pBlock->pDataBlock, srcSlotId); @@ -3292,7 +3187,6 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { SSDataBlock* pResBlock = pInfo->pFinalRes; blockDataCleanup(pResBlock); - blockDataCleanup(pInfo->pRes); int32_t order = TSDB_ORDER_ASC; int32_t scanFlag = MAIN_SCAN; @@ -3315,14 +3209,20 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { taosFillSetStartInfo(pInfo->pFillInfo, 0, pInfo->win.ekey); } else { - blockDataUpdateTsWindow(pBlock, pInfo->primaryTsCol); + blockDataUpdateTsWindow(pBlock, pInfo->primarySrcSlotId); + + blockDataCleanup(pInfo->pRes); doApplyScalarCalculation(pOperator, pBlock, order, scanFlag); if (pInfo->curGroupId == 0 || pInfo->curGroupId == pInfo->pRes->info.groupId) { pInfo->curGroupId = pInfo->pRes->info.groupId; // the first data block pInfo->totalInputRows += pInfo->pRes->info.rows; - taosFillSetStartInfo(pInfo->pFillInfo, pInfo->pRes->info.rows, pBlock->info.window.ekey); + if (order == pInfo->pFillInfo->order) { + taosFillSetStartInfo(pInfo->pFillInfo, pInfo->pRes->info.rows, pBlock->info.window.ekey); + } else { + taosFillSetStartInfo(pInfo->pFillInfo, pInfo->pRes->info.rows, pBlock->info.window.skey); + } taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->pRes); } else if (pInfo->curGroupId != pBlock->info.groupId) { // the new group data block pInfo->existNewGroupBlock = pBlock; @@ -3354,7 +3254,6 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) { assert(pBlock != NULL); blockDataCleanup(pResBlock); - blockDataCleanup(pInfo->pRes); doHandleRemainBlockForNewGroupImpl(pOperator, pInfo, pResultInfo, pTaskInfo); if (pResBlock->info.rows > pResultInfo->threshold) { @@ -3416,7 +3315,7 @@ static void destroyOperatorInfo(SOperatorInfo* pOperator) { } if (pOperator->fpSet.closeFn != NULL) { - pOperator->fpSet.closeFn(pOperator->info, pOperator->exprSupp.numOfExprs); + pOperator->fpSet.closeFn(pOperator->info); } if (pOperator->pDownstream != NULL) { @@ -3449,11 +3348,13 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize, const char* pKey) { + int32_t code = 0; _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + pAggSup->currentPageId = -1; pAggSup->resultRowSize = getResultRowSize(pCtx, numOfOutput); pAggSup->keyBuf = taosMemoryCalloc(1, keyBufSize + POINTER_BYTES + sizeof(int64_t)); - pAggSup->pResultRowHashTable = taosHashInit(10, hashFn, true, HASH_NO_LOCK); + pAggSup->pResultRowHashTable = tSimpleHashInit(10, hashFn); if (pAggSup->keyBuf == NULL || pAggSup->pResultRowHashTable == NULL) { return TSDB_CODE_OUT_OF_MEMORY; @@ -3464,22 +3365,23 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz); if (!osTempSpaceAvailable()) { - terrno = TSDB_CODE_NO_AVAIL_DISK; - qError("Init stream agg supporter failed since %s", terrstr(terrno)); - return terrno; + code = TSDB_CODE_NO_AVAIL_DISK; + qError("Init stream agg supporter failed since %s, %s", terrstr(code), pKey); + return code; } - int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir); + + code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir); if (code != TSDB_CODE_SUCCESS) { - qError("Create agg result buf failed since %s", tstrerror(code)); + qError("Create agg result buf failed since %s, %s", tstrerror(code), pKey); return code; } - return TSDB_CODE_SUCCESS; + return code; } void cleanupAggSup(SAggSupporter* pAggSup) { taosMemoryFreeClear(pAggSup->keyBuf); - taosHashCleanup(pAggSup->pResultRowHashTable); + tSimpleHashCleanup(pAggSup->pResultRowHashTable); destroyDiskbasedBuf(pAggSup->pResultBuf); } @@ -3496,7 +3398,7 @@ int32_t initAggInfo(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInf } for (int32_t i = 0; i < numOfCols; ++i) { - pSup->pCtx[i].pBuf = pAggSup->pResultBuf; + pSup->pCtx[i].saveHandle.pBuf = pAggSup->pResultBuf; } return TSDB_CODE_SUCCESS; @@ -3528,6 +3430,7 @@ void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) { } taosMemoryFreeClear(pCtx[i].subsidiaries.pCtx); + taosMemoryFreeClear(pCtx[i].subsidiaries.buf); taosMemoryFree(pCtx[i].input.pData); taosMemoryFree(pCtx[i].input.pColumnDataAgg); } @@ -3607,7 +3510,7 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* return pOperator; _error: - destroyAggOperatorInfo(pInfo, numOfCols); + destroyAggOperatorInfo(pInfo); taosMemoryFreeClear(pOperator); pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY; return NULL; @@ -3615,7 +3518,6 @@ _error: void cleanupBasicInfo(SOptrBasicInfo* pInfo) { assert(pInfo != NULL); - cleanupResultRowInfo(&pInfo->resultRowInfo); pInfo->pRes = blockDataDestroy(pInfo->pRes); } @@ -3633,7 +3535,7 @@ static void freeItem(void* pItem) { } } -void destroyAggOperatorInfo(void* param, int32_t numOfOutput) { +void destroyAggOperatorInfo(void* param) { SAggOperatorInfo* pInfo = (SAggOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); @@ -3643,7 +3545,7 @@ void destroyAggOperatorInfo(void* param, int32_t numOfOutput) { taosMemoryFreeClear(param); } -void destroyFillOperatorInfo(void* param, int32_t numOfOutput) { +void destroyFillOperatorInfo(void* param) { SFillOperatorInfo* pInfo = (SFillOperatorInfo*)param; pInfo->pFillInfo = taosDestroyFillInfo(pInfo->pFillInfo); pInfo->pRes = blockDataDestroy(pInfo->pRes); @@ -3659,12 +3561,12 @@ void destroyFillOperatorInfo(void* param, int32_t numOfOutput) { taosMemoryFreeClear(param); } -void destroyExchangeOperatorInfo(void* param, int32_t numOfOutput) { +void destroyExchangeOperatorInfo(void* param) { SExchangeInfo* pExInfo = (SExchangeInfo*)param; taosRemoveRef(exchangeObjRefPool, pExInfo->self); } -void freeSourceDataInfo(void *p) { +void freeSourceDataInfo(void* p) { SSourceDataInfo* pInfo = (SSourceDataInfo*)p; taosMemoryFreeClear(pInfo->pRsp); } @@ -3691,13 +3593,20 @@ static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t const char* id, SInterval* pInterval, int32_t fillType, int32_t order) { SFillColInfo* pColInfo = createFillColInfo(pExpr, numOfCols, pNotFillExpr, numOfNotFillCols, pValNode); - STimeWindow w = getAlignQueryTimeWindow(pInterval, pInterval->precision, win.skey); - w = getFirstQualifiedTimeWindow(win.skey, &w, pInterval, TSDB_ORDER_ASC); + int64_t startKey = (order == TSDB_ORDER_ASC) ? win.skey : win.ekey; + STimeWindow w = getAlignQueryTimeWindow(pInterval, pInterval->precision, startKey); + w = getFirstQualifiedTimeWindow(startKey, &w, pInterval, order); - pInfo->pFillInfo = - taosCreateFillInfo(w.skey, numOfCols, numOfNotFillCols, capacity, pInterval, fillType, pColInfo, pInfo->primaryTsCol, order, id); + pInfo->pFillInfo = taosCreateFillInfo(w.skey, numOfCols, numOfNotFillCols, capacity, pInterval, fillType, pColInfo, + pInfo->primaryTsCol, order, id); - pInfo->win = win; + if (order == TSDB_ORDER_ASC) { + pInfo->win.skey = win.skey; + pInfo->win.ekey = win.ekey; + } else { + pInfo->win.skey = win.ekey; + pInfo->win.ekey = win.skey; + } pInfo->p = taosMemoryCalloc(numOfCols, POINTER_BYTES); if (pInfo->pFillInfo == NULL || pInfo->p == NULL) { @@ -3721,10 +3630,10 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* SExprInfo* pExprInfo = createExprInfo(pPhyFillNode->pFillExprs, NULL, &pInfo->numOfExpr); pInfo->pNotFillExprInfo = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &pInfo->numOfNotFillExpr); - SInterval* pInterval = + SInterval* pInterval = QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == downstream->operatorType - ? &((SMergeAlignedIntervalAggOperatorInfo*)downstream->info)->intervalAggOperatorInfo->interval - : &((SIntervalAggOperatorInfo*)downstream->info)->interval; + ? &((SMergeAlignedIntervalAggOperatorInfo*)downstream->info)->intervalAggOperatorInfo->interval + : &((SIntervalAggOperatorInfo*)downstream->info)->interval; int32_t order = (pPhyFillNode->inputTsOrder == ORDER_ASC) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; int32_t type = convertFillType(pPhyFillNode->mode); @@ -3741,9 +3650,9 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* SArray* pColMatchColInfo = extractColMatchInfo(pPhyFillNode->pFillExprs, pPhyFillNode->node.pOutputDataBlockDesc, &numOfOutputCols, COL_MATCH_FROM_SLOT_ID); - int32_t code = - initFillInfo(pInfo, pExprInfo, pInfo->numOfExpr, pInfo->pNotFillExprInfo, pInfo->numOfNotFillExpr, (SNodeListNode*)pPhyFillNode->pValues, - pPhyFillNode->timeRange, pResultInfo->capacity, pTaskInfo->id.str, pInterval, type, order); + int32_t code = initFillInfo(pInfo, pExprInfo, pInfo->numOfExpr, pInfo->pNotFillExprInfo, pInfo->numOfNotFillExpr, + (SNodeListNode*)pPhyFillNode->pValues, pPhyFillNode->timeRange, pResultInfo->capacity, + pTaskInfo->id.str, pInterval, type, order); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -3874,9 +3783,9 @@ static void cleanupTableSchemaInfo(SSchemaInfo* pSchemaInfo) { tDeleteSSchemaWrapper(pSchemaInfo->qsw); } -static int32_t sortTableGroup(STableListInfo* pTableListInfo, int32_t groupNum) { +static int32_t sortTableGroup(STableListInfo* pTableListInfo) { taosArrayClear(pTableListInfo->pGroupList); - SArray* sortSupport = taosArrayInit(groupNum, sizeof(uint64_t)); + SArray* sortSupport = taosArrayInit(16, sizeof(uint64_t)); if (sortSupport == NULL) return TSDB_CODE_OUT_OF_MEMORY; for (int32_t i = 0; i < taosArrayGetSize(pTableListInfo->pTableList); i++) { STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i); @@ -3954,48 +3863,26 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle, if (pTableListInfo->map == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } - int32_t keyLen = 0; - void* keyBuf = NULL; - - SNode* node; - FOREACH(node, group) { - SExprNode* pExpr = (SExprNode*)node; - keyLen += pExpr->resType.bytes; - } - - int32_t nullFlagSize = sizeof(int8_t) * LIST_LENGTH(group); - keyLen += nullFlagSize; - - keyBuf = taosMemoryCalloc(1, keyLen); - if (keyBuf == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; - } bool assignUid = groupbyTbname(group); - int32_t groupNum = 0; - size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList); - - for (int32_t i = 0; i < numOfTables; i++) { - STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i); + size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList); - if (assignUid) { + if (assignUid) { + for (int32_t i = 0; i < numOfTables; i++) { + STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i); info->groupId = info->uid; - } else { - int32_t code = getGroupIdFromTagsVal(pHandle->meta, info->uid, group, keyBuf, &info->groupId); - if (code != TSDB_CODE_SUCCESS) { - return code; - } + taosHashPut(pTableListInfo->map, &(info->uid), sizeof(uint64_t), &info->groupId, sizeof(uint64_t)); + } + } else { + int32_t code = getColInfoResultForGroupby(pHandle->meta, group, pTableListInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; } - - taosHashPut(pTableListInfo->map, &(info->uid), sizeof(uint64_t), &info->groupId, sizeof(uint64_t)); - groupNum++; } - taosMemoryFree(keyBuf); - if (pTableListInfo->needSortTableByGroupId) { - return sortTableGroup(pTableListInfo, groupNum); + return sortTableGroup(pTableListInfo); } return TDB_CODE_SUCCESS; @@ -4021,6 +3908,7 @@ static int32_t initTableblockDistQueryCond(uint64_t uid, SQueryTableDataCond* pC pCond->type = TIMEWINDOW_RANGE_CONTAINED; pCond->startVersion = -1; pCond->endVersion = -1; + pCond->schemaVersion = -1; return TSDB_CODE_SUCCESS; } @@ -4040,6 +3928,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pTableListInfo, pTagCond, pTagIndexCond, GET_TASKID(pTaskInfo)); if (code) { pTaskInfo->code = code; + qError("failed to createScanTableListInfo, code:%s, %s", tstrerror(code), GET_TASKID(pTaskInfo)); return NULL; } @@ -4059,6 +3948,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pTableListInfo, pTagCond, pTagIndexCond, GET_TASKID(pTaskInfo)); if (code) { pTaskInfo->code = code; + qError("failed to createScanTableListInfo, code: %s", tstrerror(code)); return NULL; } @@ -4082,6 +3972,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pHandle, pTableListInfo, pTagCond, pTagIndexCond, GET_TASKID(pTaskInfo)); if (code) { pTaskInfo->code = code; + qError("failed to createScanTableListInfo, code: %s", tstrerror(code)); return NULL; } @@ -4104,6 +3995,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanPhyNode, pTagCond, pTagIndexCond, pTableListInfo); if (code != TSDB_CODE_SUCCESS) { pTaskInfo->code = terrno; + qError("failed to getTableList, code: %s", tstrerror(code)); return NULL; } @@ -4150,7 +4042,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo return NULL; } - pOperator = createLastrowScanOperator(pScanNode, pHandle, pTaskInfo); + pOperator = createCacherowsScanOperator(pScanNode, pHandle, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_PROJECT == type) { pOperator = createProjectOperatorInfo(NULL, (SProjectPhysiNode*)pPhyNode, pTaskInfo); } else { @@ -4174,9 +4066,9 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo if (ops[i] == NULL) { taosMemoryFree(ops); return NULL; - } else { - ops[i]->resultDataBlockId = pChildNode->pOutputDataBlockDesc->dataBlockId; } + + ops[i]->resultDataBlockId = pChildNode->pOutputDataBlockDesc->dataBlockId; } SOperatorInfo* pOptr = NULL; @@ -4228,37 +4120,10 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == type) { SMergeAlignedIntervalPhysiNode* pIntervalPhyNode = (SMergeAlignedIntervalPhysiNode*)pPhyNode; - - SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &num); - SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); - - SInterval interval = {.interval = pIntervalPhyNode->interval, - .sliding = pIntervalPhyNode->sliding, - .intervalUnit = pIntervalPhyNode->intervalUnit, - .slidingUnit = pIntervalPhyNode->slidingUnit, - .offset = pIntervalPhyNode->offset, - .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision}; - - int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId; - pOptr = createMergeAlignedIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, - pPhyNode->pConditions, pIntervalPhyNode->window.mergeDataBlock, - pTaskInfo); + pOptr = createMergeAlignedIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL == type) { SMergeIntervalPhysiNode* pIntervalPhyNode = (SMergeIntervalPhysiNode*)pPhyNode; - - SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &num); - SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); - - SInterval interval = {.interval = pIntervalPhyNode->interval, - .sliding = pIntervalPhyNode->sliding, - .intervalUnit = pIntervalPhyNode->intervalUnit, - .slidingUnit = pIntervalPhyNode->slidingUnit, - .offset = pIntervalPhyNode->offset, - .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision}; - - int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId; - pOptr = createMergeIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, - pIntervalPhyNode->window.mergeDataBlock, pTaskInfo); + pOptr = createMergeIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL == type) { int32_t children = 0; pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children); @@ -4287,17 +4152,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pOptr = createPartitionOperatorInfo(ops[0], (SPartitionPhysiNode*)pPhyNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE == type) { SStateWinodwPhysiNode* pStateNode = (SStateWinodwPhysiNode*)pPhyNode; - - STimeWindowAggSupp as = {.waterMark = pStateNode->window.watermark, .calTrigger = pStateNode->window.triggerType}; - - SExprInfo* pExprInfo = createExprInfo(pStateNode->window.pFuncs, NULL, &num); - SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); - int32_t tsSlotId = ((SColumnNode*)pStateNode->window.pTspk)->slotId; - - SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pStateNode->pStateKey)->pExpr; - SColumn col = extractColumnFromColumnNode(pColNode); - pOptr = createStatewindowOperatorInfo(ops[0], pExprInfo, num, pResBlock, &as, tsSlotId, &col, pPhyNode->pConditions, - pTaskInfo); + pOptr = createStatewindowOperatorInfo(ops[0], pStateNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE == type) { pOptr = createStreamStateAggOperatorInfo(ops[0], pPhyNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN == type) { @@ -4311,8 +4166,12 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo } else { ASSERT(0); } + taosMemoryFree(ops); - if (pOptr) pOptr->resultDataBlockId = pPhyNode->pOutputDataBlockDesc->dataBlockId; + if (pOptr) { + pOptr->resultDataBlockId = pPhyNode->pOutputDataBlockDesc->dataBlockId; + } + return pOptr; } @@ -4349,42 +4208,6 @@ SArray* extractColumnInfo(SNodeList* pNodeList) { return pList; } -#if 0 -STsdbReader* doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, - STableListInfo* pTableListInfo, const char* idstr) { - int32_t code = getTableList(pHandle->meta, pHandle->vnode, &pTableScanNode->scan, pTableListInfo); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - if (taosArrayGetSize(pTableListInfo->pTableList) == 0) { - code = 0; - qDebug("no table qualified for query, %s", idstr); - goto _error; - } - - SQueryTableDataCond cond = {0}; - code = initQueryTableDataCond(&cond, pTableScanNode); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - STsdbReader* pReader; - code = tsdbReaderOpen(pHandle->vnode, &cond, pTableListInfo->pTableList, &pReader, idstr); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - cleanupQueryTableDataCond(&cond); - - return pReader; - -_error: - terrno = code; - return NULL; -} -#endif - static int32_t extractTbscanInStreamOpTree(SOperatorInfo* pOperator, STableScanInfo** ppInfo) { if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { if (pOperator->numOfDownstream == 0) { @@ -4602,6 +4425,10 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead goto _complete; } + if (pHandle && pHandle->pStateBackend) { + (*pTaskInfo)->streamInfo.pState = pHandle->pStateBackend; + } + (*pTaskInfo)->sql = sql; sql = NULL; (*pTaskInfo)->pSubplan = pPlan; @@ -4654,27 +4481,6 @@ void doDestroyTask(SExecTaskInfo* pTaskInfo) { taosMemoryFreeClear(pTaskInfo); } -static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type, int16_t bytes) { - if (val == NULL) { - setNull(output, type, bytes); - return; - } - - if (IS_VAR_DATA_TYPE(type)) { - // Binary data overflows for sort of unknown reasons. Let trim the overflow data - if (varDataTLen(val) > bytes) { - int32_t maxLen = bytes - VARSTR_HEADER_SIZE; - int32_t len = (varDataLen(val) > maxLen) ? maxLen : varDataLen(val); - memcpy(varDataVal(output), varDataVal(val), len); - varDataSetLen(output, len); - } else { - varDataCopy(output, val); - } - } else { - memcpy(output, val, bytes); - } -} - static int64_t getQuerySupportBufSize(size_t numOfTables) { size_t s1 = sizeof(STableQueryInfo); // size_t s3 = sizeof(STableCheckInfo); buffer consumption in tsdb @@ -4747,6 +4553,7 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInf int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t size) { + pSup->currentPageId = -1; pSup->resultRowSize = getResultRowSize(pCtx, numOfOutput); pSup->keySize = sizeof(int64_t) + sizeof(TSKEY); pSup->pKeyBuf = taosMemoryCalloc(1, pSup->keySize); @@ -4774,7 +4581,8 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlF } int32_t code = createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, tsTempDir); for (int32_t i = 0; i < numOfOutput; ++i) { - pCtx[i].pBuf = pSup->pResultBuf; + pCtx[i].saveHandle.pBuf = pSup->pResultBuf; } + return code; } diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 507719e0aac3d8a8224828433cc5c66445dea0c1..5d123f723e01d98faa791e612f95235ffef5a04d 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -36,8 +36,12 @@ static void freeGroupKey(void* param) { taosMemoryFree(pKey->pData); } -static void destroyGroupOperatorInfo(void* param, int32_t numOfOutput) { +static void destroyGroupOperatorInfo(void* param) { SGroupbyOperatorInfo* pInfo = (SGroupbyOperatorInfo*)param; + if (pInfo == NULL) { + return; + } + cleanupBasicInfo(&pInfo->binfo); taosMemoryFreeClear(pInfo->keyBuf); taosArrayDestroy(pInfo->pGroupCols); @@ -247,7 +251,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) { if (!pInfo->isInit) { recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j); if (terrno != TSDB_CODE_SUCCESS) { // group by json error - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } pInfo->isInit = true; num++; @@ -265,7 +269,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) { num++; recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j); if (terrno != TSDB_CODE_SUCCESS) { // group by json error - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } continue; } @@ -273,11 +277,11 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) { len = buildGroupKeys(pInfo->keyBuf, pInfo->pGroupColVals); int32_t ret = setGroupResultOutputBuf(pOperator, &(pInfo->binfo), pOperator->exprSupp.numOfExprs, pInfo->keyBuf, len, pBlock->info.groupId, pInfo->aggSup.pResultBuf, &pInfo->aggSup); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code - longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); } int32_t rowIndex = j - num; - doApplyFunctions(pTaskInfo, pCtx, &w, NULL, rowIndex, num, NULL, pBlock->info.rows, pOperator->exprSupp.numOfExprs, TSDB_ORDER_ASC); + doApplyFunctions(pTaskInfo, pCtx, NULL, rowIndex, num, pBlock->info.rows, pOperator->exprSupp.numOfExprs); // assign the group keys or user input constant values if required doAssignGroupKeys(pCtx, pOperator->exprSupp.numOfExprs, pBlock->info.rows, rowIndex); @@ -291,11 +295,11 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) { setGroupResultOutputBuf(pOperator, &(pInfo->binfo), pOperator->exprSupp.numOfExprs, pInfo->keyBuf, len, pBlock->info.groupId, pInfo->aggSup.pResultBuf, &pInfo->aggSup); if (ret != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); } int32_t rowIndex = pBlock->info.rows - num; - doApplyFunctions(pTaskInfo, pCtx, &w, NULL, rowIndex, num, NULL, pBlock->info.rows, pOperator->exprSupp.numOfExprs, TSDB_ORDER_ASC); + doApplyFunctions(pTaskInfo, pCtx, NULL, rowIndex, num, pBlock->info.rows, pOperator->exprSupp.numOfExprs); doAssignGroupKeys(pCtx, pOperator->exprSupp.numOfExprs, pBlock->info.rows, rowIndex); } } @@ -350,7 +354,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) { int32_t code = getTableScanInfo(pOperator, &order, &scanFlag); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } // the pDataBlock are always the same one, no need to call this again @@ -360,7 +364,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) { if (pInfo->scalarSup.pExprInfo != NULL) { pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx, pInfo->scalarSup.numOfExprs, NULL); if (pTaskInfo->code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, pTaskInfo->code); + T_LONG_JMP(pTaskInfo->env, pTaskInfo->code); } } @@ -413,7 +417,11 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx } initResultSizeInfo(&pOperator->resultInfo, 4096); - initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, pInfo->groupKeyLen, pTaskInfo->id.str); + code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, pInfo->groupKeyLen, pTaskInfo->id.str); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + initBasicInfo(&pInfo->binfo, pResultBlock); initResultRowInfo(&pInfo->binfo.resultRowInfo); @@ -426,11 +434,15 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, hashGroupbyAggregate, NULL, NULL, destroyGroupOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL); code = appendDownstream(pOperator, &downstream, 1); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + return pOperator; _error: pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY; - taosMemoryFreeClear(pInfo); + destroyGroupOperatorInfo(pInfo); taosMemoryFreeClear(pOperator); return NULL; } @@ -535,7 +547,7 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf p = taosHashGet(pInfo->pGroupSet, pInfo->keyBuf, len); int32_t pageId = 0; - pPage = getNewBufPage(pInfo->pBuf, 0, &pageId); + pPage = getNewBufPage(pInfo->pBuf, &pageId); taosArrayPush(p->pPageList, &pageId); *(int32_t *) pPage = 0; @@ -550,7 +562,7 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf // add a new page for current group int32_t pageId = 0; - pPage = getNewBufPage(pInfo->pBuf, 0, &pageId); + pPage = getNewBufPage(pInfo->pBuf, &pageId); taosArrayPush(p->pPageList, &pageId); memset(pPage, 0, getBufPageSize(pInfo->pBuf)); } @@ -678,20 +690,20 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) { if (pInfo->scalarSup.pExprInfo != NULL) { pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx, pInfo->scalarSup.numOfExprs, NULL); if (pTaskInfo->code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, pTaskInfo->code); + T_LONG_JMP(pTaskInfo->env, pTaskInfo->code); } } terrno = TSDB_CODE_SUCCESS; doHashPartition(pOperator, pBlock); if (terrno != TSDB_CODE_SUCCESS) { // group by json error - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } } SArray* groupArray = taosArrayInit(taosHashGetSize(pInfo->pGroupSet), sizeof(SDataGroupInfo)); - void* pGroupIter = NULL; - pGroupIter = taosHashIterate(pInfo->pGroupSet, NULL); + + void* pGroupIter = taosHashIterate(pInfo->pGroupSet, NULL); while (pGroupIter != NULL) { SDataGroupInfo* pGroupInfo = pGroupIter; taosArrayPush(groupArray, pGroupInfo); @@ -710,7 +722,7 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) { return buildPartitionResult(pOperator); } -static void destroyPartitionOperatorInfo(void* param, int32_t numOfOutput) { +static void destroyPartitionOperatorInfo(void* param) { SPartitionOperatorInfo* pInfo = (SPartitionOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); taosArrayDestroy(pInfo->pGroupCols); diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c index 7d2b84d0f053a7c8c6e3f63db719f67b3d9e99f3..1bc7d458e0ee16decabea988a16713996d2468ce 100644 --- a/source/libs/executor/src/joinoperator.c +++ b/source/libs/executor/src/joinoperator.c @@ -25,7 +25,7 @@ static void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode); static SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator); -static void destroyMergeJoinOperator(void* param, int32_t numOfOutput); +static void destroyMergeJoinOperator(void* param); static void extractTimeCondition(SJoinOperatorInfo* pInfo, SOperatorInfo** pDownstream, int32_t numOfDownstream, SSortMergeJoinPhysiNode* pJoinNode); @@ -128,12 +128,11 @@ void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode) { pColumn->scale = pColumnNode->node.resType.scale; } -void destroyMergeJoinOperator(void* param, int32_t numOfOutput) { +void destroyMergeJoinOperator(void* param) { SJoinOperatorInfo* pJoinOperator = (SJoinOperatorInfo*)param; nodesDestroyNode(pJoinOperator->pCondAfterMerge); pJoinOperator->pRes = blockDataDestroy(pJoinOperator->pRes); - taosMemoryFreeClear(param); } diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index f2b79bf703343a8365b832b23dcd514e5ecc0574..2f12a0d19bdf74e7b0b2ab94c373a31cbe7d8316 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -23,7 +23,7 @@ static SArray* setRowTsColumnOutputInfo(SqlFunctionCtx* pCtx, int32_t numOf static void setFunctionResultOutput(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t stage, int32_t numOfExprs); -static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput) { +static void destroyProjectOperatorInfo(void* param) { if (NULL == param) { return; } @@ -37,10 +37,13 @@ static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput) { taosMemoryFreeClear(param); } -static void destroyIndefinitOperatorInfo(void* param, int32_t numOfOutput) { +static void destroyIndefinitOperatorInfo(void* param) { SIndefOperatorInfo* pInfo = (SIndefOperatorInfo*)param; - cleanupBasicInfo(&pInfo->binfo); + if (pInfo == NULL) { + return; + } + cleanupBasicInfo(&pInfo->binfo); taosArrayDestroy(pInfo->pPseudoColInfo); cleanupAggSup(&pInfo->aggSup); cleanupExprSupp(&pInfo->scalarSup); @@ -50,9 +53,11 @@ static void destroyIndefinitOperatorInfo(void* param, int32_t numOfOutput) { SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pProjPhyNode, SExecTaskInfo* pTaskInfo) { + int32_t code = TSDB_CODE_SUCCESS; SProjectOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SProjectOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; goto _error; } @@ -67,12 +72,11 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys pInfo->binfo.pRes = pResBlock; pInfo->pFinalRes = createOneDataBlock(pResBlock, false); pInfo->pFilterNode = pProjPhyNode->node.pConditions; - pInfo->mergeDataBlocks = pProjPhyNode->mergeDataBlock; - - // todo remove it soon if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM) { pInfo->mergeDataBlocks = false; + } else { + pInfo->mergeDataBlocks = pProjPhyNode->mergeDataBlock; } int32_t numOfRows = 4096; @@ -83,9 +87,13 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys if (numOfRows * pResBlock->info.rowSize > TWOMB) { numOfRows = TWOMB / pResBlock->info.rowSize; } + initResultSizeInfo(&pOperator->resultInfo, numOfRows); + code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } - initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str); initBasicInfo(&pInfo->binfo, pResBlock); setFunctionResultOutput(pOperator, &pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, numOfCols); @@ -99,7 +107,7 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doProjectOperation, NULL, NULL, destroyProjectOperatorInfo, NULL, NULL, NULL); - int32_t code = appendDownstream(pOperator, &downstream, 1); + code = appendDownstream(pOperator, &downstream, 1); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -107,7 +115,9 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys return pOperator; _error: - pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY; + destroyProjectOperatorInfo(pInfo); + taosMemoryFree(pOperator); + pTaskInfo->code = code; return NULL; } @@ -175,7 +185,8 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows); blockDataKeepFirstNRows(pBlock, keepRows); //TODO: optimize it later when partition by + limit - if ((pLimitInfo->slimit.limit == -1 && pLimitInfo->currentGroupId == 0) || pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups) { + if ((pLimitInfo->slimit.limit == -1 && pLimitInfo->currentGroupId == 0) || + (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) { doSetOperatorCompleted(pOperator); } } @@ -184,16 +195,6 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS return PROJECT_RETRIEVE_DONE; } -void printDataBlock1(SSDataBlock* pBlock, const char* flag) { - if (!pBlock || pBlock->info.rows == 0) { - qDebug("===stream===printDataBlock: Block is Null or Empty"); - return; - } - char* pBuf = NULL; - qDebug("%s", dumpBlockData(pBlock, flag, &pBuf)); - taosMemoryFreeClear(pBuf); -} - SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { SProjectOperatorInfo* pProjectInfo = pOperator->info; SOptrBasicInfo* pInfo = &pProjectInfo->binfo; @@ -260,7 +261,7 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { // the pDataBlock are always the same one, no need to call this again int32_t code = getTableScanInfo(downstream, &order, &scanFlag); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, false); @@ -269,7 +270,7 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { code = projectApplyFunctions(pSup->pExprInfo, pInfo->pRes, pBlock, pSup->pCtx, pSup->numOfExprs, pProjectInfo->pPseudoColInfo); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } status = doIngroupLimitOffset(pLimitInfo, pBlock->info.groupId, pInfo->pRes, pOperator); @@ -363,9 +364,12 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy initResultSizeInfo(&pOperator->resultInfo, numOfRows); - initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfExpr, keyBufSize, pTaskInfo->id.str); - initBasicInfo(&pInfo->binfo, pResBlock); + int32_t code = initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfExpr, keyBufSize, pTaskInfo->id.str); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + initBasicInfo(&pInfo->binfo, pResBlock); setFunctionResultOutput(pOperator, &pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, numOfExpr); pInfo->binfo.pRes = pResBlock; @@ -381,7 +385,7 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doApplyIndefinitFunction, NULL, NULL, destroyIndefinitOperatorInfo, NULL, NULL, NULL); - int32_t code = appendDownstream(pOperator, &downstream, 1); + code = appendDownstream(pOperator, &downstream, 1); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -389,7 +393,7 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy return pOperator; _error: - taosMemoryFree(pInfo); + destroyIndefinitOperatorInfo(pInfo); taosMemoryFree(pOperator); pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY; return NULL; @@ -407,7 +411,7 @@ static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOp // the pDataBlock are always the same one, no need to call this again int32_t code = getTableScanInfo(downstream, &order, &scanFlag); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } // there is an scalar expression that needs to be calculated before apply the group aggregation. @@ -416,7 +420,7 @@ static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOp code = projectApplyFunctions(pScalarSup->pExprInfo, pBlock, pBlock, pScalarSup->pCtx, pScalarSup->numOfExprs, pIndefInfo->pPseudoColInfo); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } @@ -426,7 +430,7 @@ static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOp code = projectApplyFunctions(pSup->pExprInfo, pInfo->pRes, pBlock, pSup->pCtx, pSup->numOfExprs, pIndefInfo->pPseudoColInfo); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index d8de8df163207b89120c03dc3b8c42dcff3776d9..de6768b83ab56b6c1302b1af6de4c7696463ba0c 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -13,11 +13,11 @@ * along with this program. If not, see . */ -#include "os.h" #include "executorimpl.h" #include "filter.h" #include "function.h" #include "functionMgt.h" +#include "os.h" #include "querynodes.h" #include "systable.h" #include "tname.h" @@ -36,8 +36,8 @@ #define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC)) static int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity); -static int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size, - const char* dbName); +static int32_t buildDbTableInfoBlock(bool sysInfo, const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, + size_t size, const char* dbName); static bool processBlockWithProbability(const SSampleExecInfo* pInfo); @@ -128,7 +128,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn w = getAlignQueryTimeWindow(pInterval, pInterval->precision, pBlockInfo->window.skey); assert(w.ekey >= pBlockInfo->window.skey); - if (w.ekey < pBlockInfo->window.ekey) { + if (TMAX(w.skey, pBlockInfo->window.skey) <= TMIN(w.ekey, pBlockInfo->window.ekey)) { return true; } @@ -139,7 +139,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn } assert(w.ekey > pBlockInfo->window.ekey); - if (w.skey <= pBlockInfo->window.ekey && w.skey > pBlockInfo->window.skey) { + if (TMAX(w.skey, pBlockInfo->window.skey) <= pBlockInfo->window.ekey) { return true; } } @@ -147,7 +147,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn w = getAlignQueryTimeWindow(pInterval, pInterval->precision, pBlockInfo->window.ekey); assert(w.skey <= pBlockInfo->window.ekey); - if (w.skey > pBlockInfo->window.skey) { + if (TMAX(w.skey, pBlockInfo->window.skey) <= TMIN(w.ekey, pBlockInfo->window.ekey)) { return true; } @@ -158,7 +158,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn } assert(w.skey < pBlockInfo->window.skey); - if (w.ekey < pBlockInfo->window.ekey && w.ekey >= pBlockInfo->window.skey) { + if (pBlockInfo->window.skey <= TMIN(w.ekey, pBlockInfo->window.ekey)) { return true; } } @@ -178,8 +178,8 @@ static SResultRow* getTableGroupOutputBuf(SOperatorInfo* pOperator, uint64_t gro STableScanInfo* pTableScanInfo = pOperator->info; - SResultRowPosition* p1 = - (SResultRowPosition*)taosHashGet(pTableScanInfo->pdInfo.pAggSup->pResultRowHashTable, buf, GET_RES_WINDOW_KEY_LEN(sizeof(groupId))); + SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(pTableScanInfo->pdInfo.pAggSup->pResultRowHashTable, buf, + GET_RES_WINDOW_KEY_LEN(sizeof(groupId))); if (p1 == NULL) { return NULL; @@ -238,7 +238,7 @@ static FORCE_INLINE bool doFilterByBlockSMA(const SNode* pFilterNode, SColumnDat // todo move to the initialization function int32_t code = filterInitFromNode((SNode*)pFilterNode, &filter, 0); - bool keep = filterRangeExecute(filter, pColsAgg, numOfCols, numOfRows); + bool keep = filterRangeExecute(filter, pColsAgg, numOfCols, numOfRows); filterFreeInfo(filter); return keep; @@ -250,7 +250,7 @@ static bool doLoadBlockSMA(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock, int32_t code = tsdbRetrieveDatablockSMA(pTableScanInfo->dataReader, &pColAgg, &allColumnsHaveAgg); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } if (!allColumnsHaveAgg) { @@ -264,7 +264,7 @@ static bool doLoadBlockSMA(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock, if (pBlock->pBlockAgg == NULL) { pBlock->pBlockAgg = taosMemoryCalloc(numOfCols, POINTER_BYTES); if (pBlock->pBlockAgg == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY); } } @@ -312,9 +312,9 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca return TSDB_CODE_SUCCESS; } else if (*status == FUNC_DATA_REQUIRED_STATIS_LOAD) { pCost->loadBlockStatis += 1; - loadSMA = true; // mark the operation of load sma; + loadSMA = true; // mark the operation of load sma; bool success = doLoadBlockSMA(pTableScanInfo, pBlock, pTaskInfo); - if (success) { // failed to load the block sma data, data block statistics does not exist, load data block instead + if (success) { // failed to load the block sma data, data block statistics does not exist, load data block instead qDebug("%s data block SMA loaded, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); return TSDB_CODE_SUCCESS; @@ -353,6 +353,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); pCost->skipBlocks += 1; + *status = FUNC_DATA_REQUIRED_FILTEROUT; return TSDB_CODE_SUCCESS; } @@ -373,7 +374,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca int32_t code = addTagPseudoColumnData(&pTableScanInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pBlock, GET_TASKID(pTaskInfo)); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } @@ -439,7 +440,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int } else { // these are tags STagVal tagVal = {0}; tagVal.cid = pExpr->base.pParam[0].pCol->colId; - const char* p = metaGetTableTagVal(&mr.me, pColInfoData->info.type, &tagVal); + const char* p = metaGetTableTagVal(mr.me.ctbEntry.pTags, pColInfoData->info.type, &tagVal); char* data = NULL; if (pColInfoData->info.type != TSDB_DATA_TYPE_JSON && p != NULL) { @@ -453,7 +454,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int colDataAppendNNULL(pColInfoData, 0, pBlock->info.rows); } else if (pColInfoData->info.type != TSDB_DATA_TYPE_JSON) { colDataAppendNItems(pColInfoData, 0, data, pBlock->info.rows); - } else { // todo opt for json tag + } else { // todo opt for json tag for (int32_t i = 0; i < pBlock->info.rows; ++i) { colDataAppend(pColInfoData, i, data, false); } @@ -494,7 +495,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { while (tsdbNextDataBlock(pTableScanInfo->dataReader)) { if (isTaskKilled(pTaskInfo)) { - longjmp(pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED); } // process this data block based on the probabilities @@ -522,7 +523,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { int32_t code = loadDataBlock(pOperator, pTableScanInfo, pBlock, &status); // int32_t code = loadDataBlockOnDemand(pOperator->pRuntimeEnv, pTableScanInfo, pBlock, &status); if (code != TSDB_CODE_SUCCESS) { - longjmp(pOperator->pTaskInfo->env, code); + T_LONG_JMP(pOperator->pTaskInfo->env, code); } // current block is filter out according to filter condition, continue load the next block @@ -570,7 +571,10 @@ static SSDataBlock* doTableScanGroup(SOperatorInfo* pOperator) { if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED); pTableScanInfo->scanFlag = REPEAT_SCAN; - qDebug("%s start to repeat ascending order scan data SELECT last_row(*),hostname from cpu group by hostname;blocks due to query func required", GET_TASKID(pTaskInfo)); + qDebug( + "%s start to repeat ascending order scan data SELECT last_row(*),hostname from cpu group by hostname;blocks " + "due to query func required", + GET_TASKID(pTaskInfo)); // do prepare for the next round table scan operation tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond); @@ -645,7 +649,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { int32_t code = tsdbReaderOpen(pInfo->readHandle.vnode, &pInfo->cond, tableList, (STsdbReader**)&pInfo->dataReader, GET_TASKID(pTaskInfo)); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); return NULL; } } @@ -685,7 +689,7 @@ static int32_t getTableScannerExecInfo(struct SOperatorInfo* pOptr, void** pOptr return 0; } -static void destroyTableScanOperatorInfo(void* param, int32_t numOfOutput) { +static void destroyTableScanOperatorInfo(void* param) { STableScanInfo* pTableScanInfo = (STableScanInfo*)param; blockDataDestroy(pTableScanInfo->pResBlock); cleanupQueryTableDataCond(&pTableScanInfo->cond); @@ -833,7 +837,7 @@ static SSDataBlock* doBlockInfoScan(SOperatorInfo* pOperator) { int32_t code = doGetTableRowSize(pBlockScanInfo->readHandle.meta, pBlockScanInfo->uid, &blockDistInfo.rowSize, GET_TASKID(pTaskInfo)); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } tsdbGetFileBlocksDistInfo(pBlockScanInfo->pHandle, &blockDistInfo); @@ -859,7 +863,7 @@ static SSDataBlock* doBlockInfoScan(SOperatorInfo* pOperator) { return pBlock; } -static void destroyBlockDistScanOperatorInfo(void* param, int32_t numOfOutput) { +static void destroyBlockDistScanOperatorInfo(void* param) { SBlockDistInfo* pDistInfo = (SBlockDistInfo*)param; blockDataDestroy(pDistInfo->pResBlock); tsdbReaderClose(pDistInfo->pHandle); @@ -1082,7 +1086,10 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr SColumnInfoData* pDestStartCol = taosArrayGet(pDestBlock->pDataBlock, START_TS_COLUMN_INDEX); SColumnInfoData* pDestEndCol = taosArrayGet(pDestBlock->pDataBlock, END_TS_COLUMN_INDEX); + SColumnInfoData* pDestUidCol = taosArrayGet(pDestBlock->pDataBlock, UID_COLUMN_INDEX); SColumnInfoData* pDestGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX); + SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX); + SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX); int32_t dummy = 0; for (int32_t i = 0; i < pSrcBlock->info.rows; i++) { uint64_t groupId = getGroupId(pInfo->pTableScanOp, uidCol[i]); @@ -1096,9 +1103,13 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr SResultWindowInfo* pEndWin = getCurSessionWindow(pInfo->sessionSup.pStreamAggSup, endData[i], endData[i], groupId, 0, &dummy); ASSERT(pEndWin); + TSKEY ts = INT64_MIN; colDataAppend(pDestStartCol, i, (const char*)&pStartWin->win.skey, false); colDataAppend(pDestEndCol, i, (const char*)&pEndWin->win.ekey, false); + colDataAppendNULL(pDestUidCol, i); colDataAppend(pDestGpCol, i, (const char*)&groupId, false); + colDataAppendNULL(pDestCalStartTsCol, i); + colDataAppendNULL(pDestCalEndTsCol, i); pDestBlock->info.rows++; } return TSDB_CODE_SUCCESS; @@ -1153,13 +1164,13 @@ static int32_t generateScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock, return code; } -void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid) { +void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, int32_t uidCol, uint64_t* pID) { SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX); SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX); - SColumnInfoData* pUidCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX); + SColumnInfoData* pUidCol = taosArrayGet(pBlock->pDataBlock, uidCol); colDataAppend(pStartTsCol, pBlock->info.rows, (const char*)pStartTs, false); colDataAppend(pEndTsCol, pBlock->info.rows, (const char*)pEndTs, false); - colDataAppend(pUidCol, pBlock->info.rows, (const char*)pUid, false); + colDataAppend(pUidCol, pBlock->info.rows, (const char*)pID, false); pBlock->info.rows++; } @@ -1171,20 +1182,22 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex); ASSERT(pColDataInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP); TSKEY* tsCol = (TSKEY*)pColDataInfo->pData; + bool tableInserted = updateInfoIsTableInserted(pInfo->pUpdateInfo, pBlock->info.uid); for (int32_t rowId = 0; rowId < pBlock->info.rows; rowId++) { SResultRowInfo dumyInfo; dumyInfo.cur.pageId = -1; - bool isClosed = false; + bool isClosed = false; STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX}; - if (isOverdue(tsCol[rowId], &pInfo->twAggSup)) { + if (tableInserted && isOverdue(tsCol[rowId], &pInfo->twAggSup)) { win = getActiveTimeWindow(NULL, &dumyInfo, tsCol[rowId], &pInfo->interval, TSDB_ORDER_ASC); isClosed = isCloseWindow(&win, &pInfo->twAggSup); } // must check update info first. bool update = updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, tsCol[rowId]); - if ((update || (isSignleIntervalWindow(pInfo) && isClosed && - isDeletedWindow(&win, pBlock->info.groupId, pInfo->sessionSup.pIntervalAggSup))) && out) { - appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, &pBlock->info.uid); + bool closedWin = isClosed && isSignleIntervalWindow(pInfo) && + isDeletedWindow(&win, pBlock->info.groupId, pInfo->sessionSup.pIntervalAggSup); + if ((update || closedWin) && out) { + appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, UID_COLUMN_INDEX, &pBlock->info.uid); } } if (out) { @@ -1206,7 +1219,7 @@ static void setBlockGroupId(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32 static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock) { SDataBlockInfo* pBlockInfo = &pInfo->pRes->info; SOperatorInfo* pOperator = pInfo->pStreamScanOp; - SExecTaskInfo* pTaskInfo = pInfo->pStreamScanOp->pTaskInfo; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; blockDataEnsureCapacity(pInfo->pRes, pBlock->info.rows); @@ -1215,7 +1228,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock pInfo->pRes->info.type = STREAM_NORMAL; pInfo->pRes->info.version = pBlock->info.version; - uint64_t* groupIdPre = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t)); + uint64_t* groupIdPre = taosHashGet(pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t)); if (groupIdPre) { pInfo->pRes->info.groupId = *groupIdPre; } else { @@ -1253,7 +1266,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock GET_TASKID(pTaskInfo)); if (code != TSDB_CODE_SUCCESS) { blockDataFreeRes((SSDataBlock*)pBlock); - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } @@ -1263,12 +1276,29 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock return 0; } -static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { - // NOTE: this operator does never check if current status is done or not +static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStreamScanInfo* pInfo = pOperator->info; - qDebug("stream scan called"); + qDebug("queue scan called"); + if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) { + SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp); + if (pResult && pResult->info.rows > 0) { + qDebug("queue scan tsdb return %d rows", pResult->info.rows); + return pResult; + } else { + STableScanInfo* pTSInfo = pInfo->pTableScanOp->info; + tsdbReaderClose(pTSInfo->dataReader); + pTSInfo->dataReader = NULL; + tqOffsetResetToLog(&pTaskInfo->streamInfo.prepareStatus, pTaskInfo->streamInfo.snapshotVer); + qDebug("queue scan tsdb over, switch to wal ver %d", pTaskInfo->streamInfo.snapshotVer + 1); + if (tqSeekVer(pInfo->tqReader, pTaskInfo->streamInfo.snapshotVer + 1) < 0) { + return NULL; + } + ASSERT(pInfo->tqReader->pWalReader->curVersion == pTaskInfo->streamInfo.snapshotVer + 1); + } + } + if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) { while (1) { SFetchRet ret = {0}; @@ -1280,21 +1310,21 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { } // TODO clean data block if (pInfo->pRes->info.rows > 0) { - qDebug("stream scan log return %d rows", pInfo->pRes->info.rows); + qDebug("queue scan log return %d rows", pInfo->pRes->info.rows); return pInfo->pRes; } } else if (ret.fetchType == FETCH_TYPE__META) { ASSERT(0); - pTaskInfo->streamInfo.lastStatus = ret.offset; - pTaskInfo->streamInfo.metaBlk = ret.meta; - return NULL; + // pTaskInfo->streamInfo.lastStatus = ret.offset; + // pTaskInfo->streamInfo.metaBlk = ret.meta; + // return NULL; } else if (ret.fetchType == FETCH_TYPE__NONE) { pTaskInfo->streamInfo.lastStatus = ret.offset; ASSERT(pTaskInfo->streamInfo.lastStatus.version >= pTaskInfo->streamInfo.prepareStatus.version); ASSERT(pTaskInfo->streamInfo.lastStatus.version + 1 == pInfo->tqReader->pWalReader->curVersion); char formatBuf[80]; tFormatOffset(formatBuf, 80, &ret.offset); - qDebug("stream scan log return null, offset %s", formatBuf); + qDebug("queue scan log return null, offset %s", formatBuf); return NULL; } else { ASSERT(0); @@ -1308,11 +1338,53 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { } qDebug("stream scan tsdb return null"); return NULL; - } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META) { - // TODO scan meta + } else { ASSERT(0); return NULL; } +} + +static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { + // NOTE: this operator does never check if current status is done or not + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SStreamScanInfo* pInfo = pOperator->info; + + qDebug("stream scan called"); +#if 0 + SStreamState* pState = pTaskInfo->streamInfo.pState; + if (pState) { + printf(">>>>>>>> stream write backend\n"); + SWinKey key = { + .ts = 1, + .groupId = 2, + }; + char tmp[100] = "abcdefg1"; + if (streamStatePut(pState, &key, &tmp, strlen(tmp) + 1) < 0) { + ASSERT(0); + } + + key.ts = 2; + char tmp2[100] = "abcdefg2"; + if (streamStatePut(pState, &key, &tmp2, strlen(tmp2) + 1) < 0) { + ASSERT(0); + } + + key.groupId = 5; + key.ts = 1; + char tmp3[100] = "abcdefg3"; + if (streamStatePut(pState, &key, &tmp3, strlen(tmp3) + 1) < 0) { + ASSERT(0); + } + + char* val2 = NULL; + int32_t sz; + if (streamStateGet(pState, &key, (void**)&val2, &sz) < 0) { + ASSERT(0); + } + printf("stream read %s %d\n", val2, sz); + streamFreeVal(val2); + } +#endif if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE) { STableScanInfo* pTSInfo = pInfo->pTableScanOp->info; @@ -1390,8 +1462,8 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { SSDataBlock* pSDB = doRangeScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex); if (pSDB) { STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info; - uint64_t version = getReaderMaxVersion(pTableScanInfo->dataReader); - updateInfoSetScanRange(pInfo->pUpdateInfo, &pTableScanInfo->cond.twindows, pInfo->groupId,version); + uint64_t version = getReaderMaxVersion(pTableScanInfo->dataReader); + updateInfoSetScanRange(pInfo->pUpdateInfo, &pTableScanInfo->cond.twindows, pInfo->groupId, version); pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA; checkUpdateData(pInfo, true, pSDB, false); return pSDB; @@ -1445,7 +1517,8 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { setBlockIntoRes(pInfo, &block); - if (updateInfoIgnore(pInfo->pUpdateInfo, &pInfo->pRes->info.window, pInfo->pRes->info.groupId, pInfo->pRes->info.version)) { + if (updateInfoIgnore(pInfo->pUpdateInfo, &pInfo->pRes->info.window, pInfo->pRes->info.groupId, + pInfo->pRes->info.version)) { printDataBlock(pInfo->pRes, "stream scan ignore"); blockDataCleanup(pInfo->pRes); continue; @@ -1495,11 +1568,6 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { } } -static SSDataBlock* doRawScan(SOperatorInfo* pInfo) { - // - return NULL; -} - static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) { SArray* tableIdList = taosArrayInit(4, sizeof(uint64_t)); @@ -1512,24 +1580,162 @@ static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) { return tableIdList; } +static SSDataBlock* doRawScan(SOperatorInfo* pOperator) { + // NOTE: this operator does never check if current status is done or not + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SStreamRawScanInfo* pInfo = pOperator->info; + pTaskInfo->streamInfo.metaRsp.metaRspLen = 0; // use metaRspLen !=0 to judge if data is meta + pTaskInfo->streamInfo.metaRsp.metaRsp = NULL; + + qDebug("tmqsnap doRawScan called"); + if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) { + SSDataBlock* pBlock = &pInfo->pRes; + + if (pInfo->dataReader && tsdbNextDataBlock(pInfo->dataReader)) { + if (isTaskKilled(pTaskInfo)) { + longjmp(pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED); + } + + tsdbRetrieveDataBlockInfo(pInfo->dataReader, &pBlock->info); + + SArray* pCols = tsdbRetrieveDataBlock(pInfo->dataReader, NULL); + pBlock->pDataBlock = pCols; + if (pCols == NULL) { + longjmp(pTaskInfo->env, terrno); + } + + qDebug("tmqsnap doRawScan get data uid:%ld", pBlock->info.uid); + pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_DATA; + pTaskInfo->streamInfo.lastStatus.uid = pBlock->info.uid; + pTaskInfo->streamInfo.lastStatus.ts = pBlock->info.window.ekey; + return pBlock; + } + + SMetaTableInfo mtInfo = getUidfromSnapShot(pInfo->sContext); + if (mtInfo.uid == 0) { // read snapshot done, change to get data from wal + qDebug("tmqsnap read snapshot done, change to get data from wal"); + pTaskInfo->streamInfo.prepareStatus.uid = mtInfo.uid; + pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG; + pTaskInfo->streamInfo.lastStatus.version = pInfo->sContext->snapVersion; + } else { + pTaskInfo->streamInfo.prepareStatus.uid = mtInfo.uid; + pTaskInfo->streamInfo.prepareStatus.ts = INT64_MIN; + qDebug("tmqsnap change get data uid:%ld", mtInfo.uid); + qStreamPrepareScan(pTaskInfo, &pTaskInfo->streamInfo.prepareStatus, pInfo->sContext->subType); + } + qDebug("tmqsnap stream scan tsdb return null"); + return NULL; + } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META) { + SSnapContext* sContext = pInfo->sContext; + void* data = NULL; + int32_t dataLen = 0; + int16_t type = 0; + int64_t uid = 0; + if (getMetafromSnapShot(sContext, &data, &dataLen, &type, &uid) < 0) { + qError("tmqsnap getMetafromSnapShot error"); + taosMemoryFreeClear(data); + return NULL; + } + + if (!sContext->queryMetaOrData) { // change to get data next poll request + pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_META; + pTaskInfo->streamInfo.lastStatus.uid = uid; + pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__SNAPSHOT_DATA; + pTaskInfo->streamInfo.metaRsp.rspOffset.uid = 0; + pTaskInfo->streamInfo.metaRsp.rspOffset.ts = INT64_MIN; + } else { + pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_META; + pTaskInfo->streamInfo.lastStatus.uid = uid; + pTaskInfo->streamInfo.metaRsp.rspOffset = pTaskInfo->streamInfo.lastStatus; + pTaskInfo->streamInfo.metaRsp.resMsgType = type; + pTaskInfo->streamInfo.metaRsp.metaRspLen = dataLen; + pTaskInfo->streamInfo.metaRsp.metaRsp = data; + } + + return NULL; + } + // else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) { + // int64_t fetchVer = pTaskInfo->streamInfo.prepareStatus.version + 1; + // + // while(1){ + // if (tqFetchLog(pInfo->tqReader->pWalReader, pInfo->sContext->withMeta, &fetchVer, &pInfo->pCkHead) < 0) { + // qDebug("tmqsnap tmq poll: consumer log end. offset %" PRId64, fetchVer); + // pTaskInfo->streamInfo.lastStatus.version = fetchVer; + // pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG; + // return NULL; + // } + // SWalCont* pHead = &pInfo->pCkHead->head; + // qDebug("tmqsnap tmq poll: consumer log offset %" PRId64 " msgType %d", fetchVer, pHead->msgType); + // + // if (pHead->msgType == TDMT_VND_SUBMIT) { + // SSubmitReq* pCont = (SSubmitReq*)&pHead->body; + // tqReaderSetDataMsg(pInfo->tqReader, pCont, 0); + // SSDataBlock* block = tqLogScanExec(pInfo->sContext->subType, pInfo->tqReader, pInfo->pFilterOutTbUid, + // &pInfo->pRes); if(block){ + // pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG; + // pTaskInfo->streamInfo.lastStatus.version = fetchVer; + // qDebug("tmqsnap fetch data msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType); + // return block; + // }else{ + // fetchVer++; + // } + // } else{ + // ASSERT(pInfo->sContext->withMeta); + // ASSERT(IS_META_MSG(pHead->msgType)); + // qDebug("tmqsnap fetch meta msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType); + // pTaskInfo->streamInfo.metaRsp.rspOffset.version = fetchVer; + // pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__LOG; + // pTaskInfo->streamInfo.metaRsp.resMsgType = pHead->msgType; + // pTaskInfo->streamInfo.metaRsp.metaRspLen = pHead->bodyLen; + // pTaskInfo->streamInfo.metaRsp.metaRsp = taosMemoryMalloc(pHead->bodyLen); + // memcpy(pTaskInfo->streamInfo.metaRsp.metaRsp, pHead->body, pHead->bodyLen); + // return NULL; + // } + // } + return NULL; +} + +static void destroyRawScanOperatorInfo(void* param) { + SStreamRawScanInfo* pRawScan = (SStreamRawScanInfo*)param; + tsdbReaderClose(pRawScan->dataReader); + destroySnapContext(pRawScan->sContext); + taosMemoryFree(pRawScan); +} + // for subscribing db or stb (not including column), // if this scan is used, meta data can be return // and schemas are decided when scanning -SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode, - SExecTaskInfo* pTaskInfo, STimeWindowAggSupp* pTwSup) { +SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, SExecTaskInfo* pTaskInfo) { // create operator // create tb reader // create meta reader // create tq reader - return NULL; + SStreamRawScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamRawScanInfo)); + SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + if (pInfo == NULL || pOperator == NULL) { + terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; + return NULL; + } + + pInfo->vnode = pHandle->vnode; + + pInfo->sContext = pHandle->sContext; + pOperator->name = "RawStreamScanOperator"; + // pOperator->blocking = false; + // pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; + pOperator->pTaskInfo = pTaskInfo; + + pOperator->fpSet = createOperatorFpSet(NULL, doRawScan, NULL, NULL, destroyRawScanOperatorInfo, NULL, NULL, NULL); + return pOperator; } -static void destroyStreamScanOperatorInfo(void* param, int32_t numOfOutput) { +static void destroyStreamScanOperatorInfo(void* param) { SStreamScanInfo* pStreamScan = (SStreamScanInfo*)param; if (pStreamScan->pTableScanOp && pStreamScan->pTableScanOp->info) { STableScanInfo* pTableScanInfo = pStreamScan->pTableScanOp->info; - destroyTableScanOperatorInfo(pTableScanInfo, numOfOutput); + destroyTableScanOperatorInfo(pTableScanInfo); taosMemoryFreeClear(pStreamScan->pTableScanOp); } if (pStreamScan->tqReader) { @@ -1540,7 +1746,7 @@ static void destroyStreamScanOperatorInfo(void* param, int32_t numOfOutput) { } if (pStreamScan->pPseudoExpr) { destroyExprInfo(pStreamScan->pPseudoExpr, pStreamScan->numOfPseudoExpr); - taosMemoryFreeClear(pStreamScan->pPseudoExpr); + taosMemoryFree(pStreamScan->pPseudoExpr); } updateInfoDestroy(pStreamScan->pUpdateInfo); @@ -1631,6 +1837,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys pInfo->readHandle = *pHandle; pInfo->tableUid = pScanPhyNode->uid; + pTaskInfo->streamInfo.snapshotVer = pHandle->version; // set the extract column id to streamHandle tqReaderSetColIdList(pInfo->tqReader, pColIds); @@ -1642,6 +1849,8 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys } taosArrayDestroy(tableIdList); memcpy(&pTaskInfo->streamInfo.tableCond, &pTSInfo->cond, sizeof(SQueryTableDataCond)); + } else { + taosArrayDestroy(pColIds); } // create the pseduo columns info @@ -1672,8 +1881,9 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock); pOperator->pTaskInfo = pTaskInfo; - pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamScan, NULL, NULL, destroyStreamScanOperatorInfo, - NULL, NULL, NULL); + __optr_fn_t nextFn = pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM ? doStreamScan : doQueueScan; + pOperator->fpSet = + createOperatorFpSet(operatorDummyOpenFn, nextFn, NULL, NULL, destroyStreamScanOperatorInfo, NULL, NULL, NULL); return pOperator; @@ -1683,7 +1893,7 @@ _error: return NULL; } -static void destroySysScanOperator(void* param, int32_t numOfOutput) { +static void destroySysScanOperator(void* param) { SSysTableScanInfo* pInfo = (SSysTableScanInfo*)param; tsem_destroy(&pInfo->ready); blockDataDestroy(pInfo->pRes); @@ -1941,7 +2151,7 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) { metaReaderClear(&smr); metaCloseTbCursor(pInfo->pCur); pInfo->pCur = NULL; - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } char stableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; @@ -2031,10 +2241,34 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) { metaReaderClear(&smr); if (numOfRows >= pOperator->resultInfo.capacity) { - break; + p->info.rows = numOfRows; + pInfo->pRes->info.rows = numOfRows; + + relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false); + doFilterResult(pInfo); + + blockDataCleanup(p); + numOfRows = 0; + + if (pInfo->pRes->info.rows > 0) { + break; + } } } + if (numOfRows > 0) { + p->info.rows = numOfRows; + pInfo->pRes->info.rows = numOfRows; + + relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false); + doFilterResult(pInfo); + + blockDataCleanup(p); + numOfRows = 0; + } + + blockDataDestroy(p); + // todo temporarily free the cursor here, the true reason why the free is not valid needs to be found if (ret != 0) { metaCloseTbCursor(pInfo->pCur); @@ -2042,14 +2276,6 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) { doSetOperatorCompleted(pOperator); } - p->info.rows = numOfRows; - pInfo->pRes->info.rows = numOfRows; - - relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false); - doFilterResult(pInfo); - - blockDataDestroy(p); - pInfo->loadInfo.totalRows += pInfo->pRes->info.rows; return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; } @@ -2128,7 +2354,7 @@ static SSDataBlock* sysTableScanUserTables(SOperatorInfo* pOperator) { metaReaderClear(&mr); metaCloseTbCursor(pInfo->pCur); pInfo->pCur = NULL; - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } // number of columns @@ -2206,10 +2432,34 @@ static SSDataBlock* sysTableScanUserTables(SOperatorInfo* pOperator) { colDataAppend(pColInfoData, numOfRows, n, false); if (++numOfRows >= pOperator->resultInfo.capacity) { - break; + p->info.rows = numOfRows; + pInfo->pRes->info.rows = numOfRows; + + relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false); + doFilterResult(pInfo); + + blockDataCleanup(p); + numOfRows = 0; + + if (pInfo->pRes->info.rows > 0) { + break; + } } } + if (numOfRows > 0) { + p->info.rows = numOfRows; + pInfo->pRes->info.rows = numOfRows; + + relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false); + doFilterResult(pInfo); + + blockDataCleanup(p); + numOfRows = 0; + } + + blockDataDestroy(p); + // todo temporarily free the cursor here, the true reason why the free is not valid needs to be found if (ret != 0) { metaCloseTbCursor(pInfo->pCur); @@ -2217,14 +2467,6 @@ static SSDataBlock* sysTableScanUserTables(SOperatorInfo* pOperator) { doSetOperatorCompleted(pOperator); } - p->info.rows = numOfRows; - pInfo->pRes->info.rows = numOfRows; - - relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false); - doFilterResult(pInfo); - - blockDataDestroy(p); - pInfo->loadInfo.totalRows += pInfo->pRes->info.rows; return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; } @@ -2248,7 +2490,7 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { // build message and send to mnode to fetch the content of system tables. SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SSysTableScanInfo* pInfo = pOperator->info; - char dbName[TSDB_DB_NAME_LEN] = {0}; + char dbName[TSDB_DB_NAME_LEN] = {0}; const char* name = tNameGetTableName(&pInfo->name); if (pInfo->showRewrite) { @@ -2260,8 +2502,8 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { return sysTableScanUserTables(pOperator); } else if (strncasecmp(name, TSDB_INS_TABLE_TAGS, TSDB_TABLE_FNAME_LEN) == 0) { return sysTableScanUserTags(pOperator); - } else if (strncasecmp(name, TSDB_INS_TABLE_STABLES, TSDB_TABLE_FNAME_LEN) == 0 && - pInfo->showRewrite && IS_SYS_DBNAME(dbName)) { + } else if (strncasecmp(name, TSDB_INS_TABLE_STABLES, TSDB_TABLE_FNAME_LEN) == 0 && pInfo->showRewrite && + IS_SYS_DBNAME(dbName)) { return sysTableScanUserSTables(pOperator); } else { // load the meta from mnode of the given epset if (pOperator->status == OP_EXEC_DONE) { @@ -2344,10 +2586,10 @@ int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity) { const SSysTableMeta* pSysDbTableMeta = NULL; getInfosDbMeta(&pSysDbTableMeta, &size); - p->info.rows = buildDbTableInfoBlock(p, pSysDbTableMeta, size, TSDB_INFORMATION_SCHEMA_DB); + p->info.rows = buildDbTableInfoBlock(pInfo->sysInfo, p, pSysDbTableMeta, size, TSDB_INFORMATION_SCHEMA_DB); getPerfDbMeta(&pSysDbTableMeta, &size); - p->info.rows = buildDbTableInfoBlock(p, pSysDbTableMeta, size, TSDB_PERFORMANCE_SCHEMA_DB); + p->info.rows = buildDbTableInfoBlock(pInfo->sysInfo, p, pSysDbTableMeta, size, TSDB_PERFORMANCE_SCHEMA_DB); pInfo->pRes->info.rows = p->info.rows; relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false); @@ -2356,13 +2598,16 @@ int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity) { return pInfo->pRes->info.rows; } -int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size, +int32_t buildDbTableInfoBlock(bool sysInfo, const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size, const char* dbName) { char n[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; int32_t numOfRows = p->info.rows; for (int32_t i = 0; i < size; ++i) { const SSysTableMeta* pm = &pSysDbTableMeta[i]; + if (!sysInfo && pm->sysInfo) { + continue; + } SColumnInfoData* pColInfoData = taosArrayGet(p->pDataBlock, 0); @@ -2416,6 +2661,7 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan pInfo->accountId = pScanPhyNode->accountId; pInfo->pUser = taosMemoryStrDup((void*)pUser); + pInfo->sysInfo = pScanPhyNode->sysInfo; pInfo->showRewrite = pScanPhyNode->showRewrite; pInfo->pRes = pResBlock; pInfo->pCondition = pScanNode->node.pConditions; @@ -2486,7 +2732,7 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", item->uid, tstrerror(terrno), GET_TASKID(pTaskInfo)); metaReaderClear(&mr); - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { @@ -2499,7 +2745,7 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { } else { // it is a tag value STagVal val = {0}; val.cid = pExprInfo[j].base.pParam[0].pCol->colId; - const char* p = metaGetTableTagVal(&mr.me, pDst->info.type, &val); + const char* p = metaGetTableTagVal(mr.me.ctbEntry.pTags, pDst->info.type, &val); char* data = NULL; if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL) { @@ -2536,12 +2782,10 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { return (pRes->info.rows == 0) ? NULL : pInfo->pRes; } -static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) { +static void destroyTagScanOperatorInfo(void* param) { STagScanInfo* pInfo = (STagScanInfo*)param; pInfo->pRes = blockDataDestroy(pInfo->pRes); - taosArrayDestroy(pInfo->pColMatchInfo); - taosMemoryFreeClear(param); } @@ -2597,16 +2841,16 @@ _error: int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags, bool groupSort, SReadHandle* pHandle, STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, const char* idStr) { - int64_t st = taosGetTimestampUs(); int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo); if (code != TSDB_CODE_SUCCESS) { + qError("failed to getTableList, code: %s", tstrerror(code)); return code; } int64_t st1 = taosGetTimestampUs(); - qDebug("generate queried table list completed, elapsed time:%.2f ms %s", (st1-st)/1000.0, idStr); + qDebug("generate queried table list completed, elapsed time:%.2f ms %s", (st1 - st) / 1000.0, idStr); if (taosArrayGetSize(pTableListInfo->pTableList) == 0) { qDebug("no table qualified for query, %s" PRIx64, idStr); @@ -2620,7 +2864,7 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags } int64_t st2 = taosGetTimestampUs(); - qDebug("generate group id map completed, elapsed time:%.2f ms %s", (st2-st1)/1000.0, idStr); + qDebug("generate group id map completed, elapsed time:%.2f ms %s", (st2 - st1) / 1000.0, idStr); return TSDB_CODE_SUCCESS; } @@ -2737,7 +2981,7 @@ static int32_t loadDataBlockFromOneTable(SOperatorInfo* pOperator, STableMergeSc int32_t code = addTagPseudoColumnData(&pTableScanInfo->readHandle, pTableScanInfo->pseudoSup.pExprInfo, pTableScanInfo->pseudoSup.numOfExprs, pBlock, GET_TASKID(pTaskInfo)); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } @@ -2780,7 +3024,7 @@ static SSDataBlock* getTableDataBlock(void* param) { STsdbReader* reader = taosArrayGetP(pTableScanInfo->dataReaders, readerIdx); while (tsdbNextDataBlock(reader)) { if (isTaskKilled(pOperator->pTaskInfo)) { - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED); + T_LONG_JMP(pOperator->pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED); } // process this data block based on the probabilities @@ -2803,7 +3047,7 @@ static SSDataBlock* getTableDataBlock(void* param) { int32_t code = loadDataBlockFromOneTable(pOperator, pTableScanInfo, readerIdx, pBlock, &status); // int32_t code = loadDataBlockOnDemand(pOperator->pRuntimeEnv, pTableScanInfo, pBlock, &status); if (code != TSDB_CODE_SUCCESS) { - longjmp(pOperator->pTaskInfo->env, code); + T_LONG_JMP(pOperator->pTaskInfo->env, code); } // current block is filter out according to filter condition, continue load the next block @@ -2896,7 +3140,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { int32_t code = tsortOpen(pInfo->pSortHandle); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } return TSDB_CODE_SUCCESS; @@ -2966,7 +3210,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) { int32_t code = pOperator->fpSet._openFn(pOperator); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } size_t tableListSize = taosArrayGetSize(pInfo->tableListInfo->pTableList); if (!pInfo->hasGroupId) { @@ -3004,7 +3248,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) { return pBlock; } -void destroyTableMergeScanOperatorInfo(void* param, int32_t numOfOutput) { +void destroyTableMergeScanOperatorInfo(void* param) { STableMergeScanInfo* pTableScanInfo = (STableMergeScanInfo*)param; cleanupQueryTableDataCond(&pTableScanInfo->cond); taosArrayDestroy(pTableScanInfo->sortSourceParams); diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 4dd5e4ec15e9521b6c2cdc39562313592242773c..e2014ec97320c863a6857e94c538bd8d8319c2a1 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -20,7 +20,7 @@ static SSDataBlock* doSort(SOperatorInfo* pOperator); static int32_t doOpenSortOperator(SOperatorInfo* pOperator); static int32_t getExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len); -static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput); +static void destroyOrderOperatorInfo(void* param); // todo add limit/offset impl SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortNode, SExecTaskInfo* pTaskInfo) { @@ -156,7 +156,7 @@ void applyScalarFunction(SSDataBlock* pBlock, void* param) { int32_t code = projectApplyFunctions(pOperator->exprSupp.pExprInfo, pBlock, pBlock, pOperator->exprSupp.pCtx, pOperator->exprSupp.numOfExprs, NULL); if (code != TSDB_CODE_SUCCESS) { - longjmp(pOperator->pTaskInfo->env, code); + T_LONG_JMP(pOperator->pTaskInfo->env, code); } } } @@ -184,7 +184,7 @@ int32_t doOpenSortOperator(SOperatorInfo* pOperator) { taosMemoryFreeClear(ps); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } pOperator->cost.openCost = (taosGetTimestampUs() - pInfo->startTs) / 1000.0; @@ -204,7 +204,7 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) { int32_t code = pOperator->fpSet._openFn(pOperator); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } SSDataBlock* pBlock = NULL; @@ -250,7 +250,7 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) { return blockDataGetNumOfRows(pBlock) > 0 ? pBlock : NULL; } -void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) { +void destroyOrderOperatorInfo(void* param) { SSortOperatorInfo* pInfo = (SSortOperatorInfo*)param; pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes); @@ -388,7 +388,7 @@ int32_t beginSortGroup(SOperatorInfo* pOperator) { taosMemoryFreeClear(ps); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } return TSDB_CODE_SUCCESS; @@ -420,7 +420,7 @@ SSDataBlock* doGroupSort(SOperatorInfo* pOperator) { int32_t code = pOperator->fpSet._openFn(pOperator); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } if (!pInfo->hasGroupId) { @@ -468,7 +468,7 @@ int32_t getGroupSortExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, u return TSDB_CODE_SUCCESS; } -void destroyGroupSortOperatorInfo(void* param, int32_t numOfOutput) { +void destroyGroupSortOperatorInfo(void* param) { SGroupSortOperatorInfo* pInfo = (SGroupSortOperatorInfo*)param; pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes); @@ -575,7 +575,7 @@ int32_t doOpenMultiwayMergeOperator(SOperatorInfo* pOperator) { int32_t code = tsortOpen(pInfo->pSortHandle); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, terrno); + T_LONG_JMP(pTaskInfo->env, terrno); } pOperator->cost.openCost = (taosGetTimestampUs() - pInfo->startTs) / 1000.0; @@ -672,7 +672,7 @@ SSDataBlock* doMultiwayMerge(SOperatorInfo* pOperator) { int32_t code = pOperator->fpSet._openFn(pOperator); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } SSDataBlock* pBlock = getMultiwaySortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, @@ -685,7 +685,7 @@ SSDataBlock* doMultiwayMerge(SOperatorInfo* pOperator) { return pBlock; } -void destroyMultiwayMergeOperatorInfo(void* param, int32_t numOfOutput) { +void destroyMultiwayMergeOperatorInfo(void* param) { SMultiwayMergeOperatorInfo* pInfo = (SMultiwayMergeOperatorInfo*)param; pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes); pInfo->pInputBlock = blockDataDestroy(pInfo->pInputBlock); diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c index 6d7cd727b9cd1edca5d76964ed97133f1a204a9d..f23552c5a7b82207ffc368dbae7c1894cb6a8edd 100644 --- a/source/libs/executor/src/tfill.c +++ b/source/libs/executor/src/tfill.c @@ -36,6 +36,7 @@ #define GET_DEST_SLOT_ID(_p) ((_p)->pExpr->base.resSchema.slotId) static void doSetVal(SColumnInfoData* pDstColInfoData, int32_t rowIndex, const SGroupKeys* pKey); +static bool fillIfWindowPseudoColumn(SFillInfo* pFillInfo, SFillColInfo* pCol, SColumnInfoData* pDstColInfoData, int32_t rowIndex); static void setNullRow(SSDataBlock* pBlock, SFillInfo* pFillInfo, int32_t rowIndex) { for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) { @@ -43,9 +44,8 @@ static void setNullRow(SSDataBlock* pBlock, SFillInfo* pFillInfo, int32_t rowInd int32_t dstSlotId = GET_DEST_SLOT_ID(pCol); SColumnInfoData* pDstColInfo = taosArrayGet(pBlock->pDataBlock, dstSlotId); if (pCol->notFillCol) { - if (pDstColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) { - colDataAppend(pDstColInfo, rowIndex, (const char*)&pFillInfo->currentKey, false); - } else { + bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfo, rowIndex); + if (!filled) { SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal; SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDstColInfo, rowIndex, pKey); @@ -76,6 +76,35 @@ static void doSetUserSpecifiedValue(SColumnInfoData* pDst, SVariant* pVar, int32 } } +//fill windows pseudo column, _wstart, _wend, _wduration and return true, otherwise return false +static bool fillIfWindowPseudoColumn(SFillInfo* pFillInfo, SFillColInfo* pCol, SColumnInfoData* pDstColInfoData, int32_t rowIndex) { + if (!pCol->notFillCol) { + return false; + } + if (pCol->pExpr->pExpr->nodeType == QUERY_NODE_COLUMN) { + if (pCol->pExpr->base.numOfParams != 1) { + return false; + } + if (pCol->pExpr->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_START) { + colDataAppend(pDstColInfoData, rowIndex, (const char*)&pFillInfo->currentKey, false); + return true; + } else if (pCol->pExpr->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_END) { + //TODO: include endpoint + SInterval* pInterval = &pFillInfo->interval; + int32_t step = (pFillInfo->order == TSDB_ORDER_ASC) ? 1 : -1; + int64_t windowEnd = + taosTimeAdd(pFillInfo->currentKey, pInterval->sliding * step, pInterval->slidingUnit, pInterval->precision); + colDataAppend(pDstColInfoData, rowIndex, (const char*)&windowEnd, false); + return true; + } else if (pCol->pExpr->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_DURATION) { + //TODO: include endpoint + colDataAppend(pDstColInfoData, rowIndex, (const char*)&pFillInfo->interval.sliding, false); + return true; + } + } + return false; +} + static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* pSrcBlock, int64_t ts, bool outOfBound) { SPoint point1, point2, point; @@ -92,10 +121,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* SFillColInfo* pCol = &pFillInfo->pFillCol[i]; SColumnInfoData* pDstColInfoData = taosArrayGet(pBlock->pDataBlock, GET_DEST_SLOT_ID(pCol)); - - if (pDstColInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP) { - colDataAppend(pDstColInfoData, index, (const char*)&pFillInfo->currentKey, false); - } else { + bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfoData, index); + if (!filled) { SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDstColInfoData, index, pKey); } @@ -106,10 +133,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; SColumnInfoData* pDstColInfoData = taosArrayGet(pBlock->pDataBlock, GET_DEST_SLOT_ID(pCol)); - - if (pDstColInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP) { - colDataAppend(pDstColInfoData, index, (const char*)&pFillInfo->currentKey, false); - } else { + bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfoData, index); + if (!filled) { SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDstColInfoData, index, pKey); } @@ -127,9 +152,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* int16_t type = pDstCol->info.type; if (pCol->notFillCol) { - if (type == TSDB_DATA_TYPE_TIMESTAMP) { - colDataAppend(pDstCol, index, (const char*)&pFillInfo->currentKey, false); - } else { + bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstCol, index); + if (!filled) { SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal; SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDstCol, index, pKey); @@ -170,9 +194,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, slotId); if (pCol->notFillCol) { - if (pDst->info.type == TSDB_DATA_TYPE_TIMESTAMP) { - colDataAppend(pDst, index, (const char*)&pFillInfo->currentKey, false); - } else { + bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDst, index); + if (!filled) { SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal; SGroupKeys* pKey = taosArrayGet(p, i); doSetVal(pDst, index, pKey); @@ -540,7 +563,7 @@ int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t ma int64_t numOfRes = -1; if (numOfRows > 0) { // still fill gap within current data block, not generating data after the result set. - TSKEY lastKey = (TSDB_ORDER_ASC == pFillInfo->order ? tsList[pFillInfo->numOfRows - 1] : tsList[0]); + TSKEY lastKey = tsList[pFillInfo->numOfRows - 1]; numOfRes = taosTimeCountInterval(lastKey, pFillInfo->currentKey, pFillInfo->interval.sliding, pFillInfo->interval.slidingUnit, pFillInfo->interval.precision); numOfRes += 1; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 6418f5305c97f95b7d62cd9f03ca22c8ded37701..152bd5939dbe8b5bed819f19f26b9c14d9cc4475 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -15,6 +15,7 @@ #include "executorimpl.h" #include "function.h" #include "functionMgt.h" +#include "tcommon.h" #include "tcompare.h" #include "tdatablock.h" #include "tfill.h" @@ -27,21 +28,21 @@ typedef enum SResultTsInterpType { #define IS_FINAL_OP(op) ((op)->isFinal) -typedef struct SWinRes { - TSKEY ts; - uint64_t groupId; -} SWinRes; - typedef struct SPullWindowInfo { STimeWindow window; uint64_t groupId; } SPullWindowInfo; +typedef struct SOpenWindowInfo { + SResultRowPosition pos; + uint64_t groupId; +} SOpenWindowInfo; + static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator); static int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo); -static SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult); +static SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult, uint64_t groupId); static void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInfo* pInfo, SResultRow* pResult); ///* @@ -602,14 +603,14 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num int32_t startPos = 0; int32_t numOfOutput = pSup->numOfExprs; - uint64_t groupId = pBlock->info.groupId; SResultRow* pResult = NULL; while (1) { SListNode* pn = tdListGetHead(pResultRowInfo->openWindow); - - SResultRowPosition* p1 = (SResultRowPosition*)pn->data; + SOpenWindowInfo* pOpenWin = (SOpenWindowInfo *)pn->data; + uint64_t groupId = pOpenWin->groupId; + SResultRowPosition* p1 = &pOpenWin->pos; if (p->pageId == p1->pageId && p->offset == p1->offset) { break; } @@ -628,21 +629,24 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &w, (scanFlag == MAIN_SCAN), &pResult, groupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } ASSERT(!isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)); SGroupKeys* pTsKey = taosArrayGet(pInfo->pPrevValues, 0); int64_t prevTs = *(int64_t*)pTsKey->pData; - doTimeWindowInterpolation(pInfo->pPrevValues, pBlock->pDataBlock, prevTs, -1, tsCols[startPos], startPos, w.ekey, - RESULT_ROW_END_INTERP, pSup); + if (groupId == pBlock->info.groupId) { + doTimeWindowInterpolation(pInfo->pPrevValues, pBlock->pDataBlock, prevTs, -1, tsCols[startPos], startPos, w.ekey, + RESULT_ROW_END_INTERP, pSup); + } setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); setNotInterpoWindowKey(pSup->pCtx, numOfExprs, RESULT_ROW_START_INTERP); - doApplyFunctions(pTaskInfo, pSup->pCtx, &w, &pInfo->twAggSup.timeWindowData, startPos, 0, tsCols, pBlock->info.rows, - numOfExprs, pInfo->inputOrder); + updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &w, true); + doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, 0, pBlock->info.rows, + numOfExprs); if (isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)) { closeResultRow(pr); @@ -813,7 +817,7 @@ static int32_t savePullWindow(SPullWindowInfo* pPullInfo, SArray* pPullWins) { int32_t compareResKey(void* pKey, void* data, int32_t index) { SArray* res = (SArray*)data; SResKeyPos* pos = taosArrayGetP(res, index); - SWinRes* pData = (SWinRes*)pKey; + SWinKey* pData = (SWinKey*)pKey; if (pData->ts == *(int64_t*)pos->key) { if (pData->groupId > pos->groupId) { return 1; @@ -829,7 +833,7 @@ int32_t compareResKey(void* pKey, void* data, int32_t index) { static int32_t saveResult(int64_t ts, int32_t pageId, int32_t offset, uint64_t groupId, SArray* pUpdated) { int32_t size = taosArrayGetSize(pUpdated); - SWinRes data = {.ts = ts, .groupId = groupId}; + SWinKey data = {.ts = ts, .groupId = groupId}; int32_t index = binarySearchCom(pUpdated, size, &data, TSDB_ORDER_DESC, compareResKey); if (index == -1) { index = 0; @@ -862,8 +866,8 @@ static int32_t saveWinResult(int64_t ts, int32_t pageId, int32_t offset, uint64_ newPos->groupId = groupId; newPos->pos = (SResultRowPosition){.pageId = pageId, .offset = offset}; *(int64_t*)newPos->key = ts; - SWinRes key = {.ts = ts, .groupId = groupId}; - if (taosHashPut(pUpdatedMap, &key, sizeof(SWinRes), &newPos, sizeof(void*)) != TSDB_CODE_SUCCESS) { + SWinKey key = {.ts = ts, .groupId = groupId}; + if (taosHashPut(pUpdatedMap, &key, sizeof(SWinKey), &newPos, sizeof(void*)) != TSDB_CODE_SUCCESS) { taosMemoryFree(newPos); } return TSDB_CODE_SUCCESS; @@ -871,7 +875,6 @@ static int32_t saveWinResult(int64_t ts, int32_t pageId, int32_t offset, uint64_ static int32_t saveWinResultRow(SResultRow* result, uint64_t groupId, SHashObj* pUpdatedMap) { return saveWinResult(result->win.skey, result->pageId, result->offset, groupId, pUpdatedMap); - ; } static int32_t saveResultRow(SResultRow* result, uint64_t groupId, SArray* pUpdated) { @@ -881,20 +884,20 @@ static int32_t saveResultRow(SResultRow* result, uint64_t groupId, SArray* pUpda static void removeResults(SArray* pWins, SHashObj* pUpdatedMap) { int32_t size = taosArrayGetSize(pWins); for (int32_t i = 0; i < size; i++) { - SWinRes* pW = taosArrayGet(pWins, i); - taosHashRemove(pUpdatedMap, pW, sizeof(SWinRes)); + SWinKey* pW = taosArrayGet(pWins, i); + taosHashRemove(pUpdatedMap, pW, sizeof(SWinKey)); } } int64_t getWinReskey(void* data, int32_t index) { SArray* res = (SArray*)data; - SWinRes* pos = taosArrayGet(res, index); + SWinKey* pos = taosArrayGet(res, index); return pos->ts; } int32_t compareWinRes(void* pKey, void* data, int32_t index) { SArray* res = (SArray*)data; - SWinRes* pos = taosArrayGetP(res, index); + SWinKey* pos = taosArrayGetP(res, index); SResKeyPos* pData = (SResKeyPos*)pKey; if (*(int64_t*)pData->key == pos->ts) { if (pData->groupId > pos->groupId) { @@ -910,11 +913,11 @@ int32_t compareWinRes(void* pKey, void* data, int32_t index) { } static void removeDeleteResults(SHashObj* pUpdatedMap, SArray* pDelWins) { - if (!pUpdatedMap || taosHashGetSize(pUpdatedMap) == 0) { + int32_t delSize = taosArrayGetSize(pDelWins); + if (taosHashGetSize(pUpdatedMap) == 0 || delSize == 0) { return; } - int32_t delSize = taosArrayGetSize(pDelWins); - void* pIte = NULL; + void* pIte = NULL; while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) { SResKeyPos* pResKey = (SResKeyPos*)pIte; int32_t index = binarySearchCom(pDelWins, delSize, pResKey, TSDB_ORDER_DESC, compareWinRes); @@ -954,7 +957,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) { @@ -970,14 +973,14 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul // prev time window not interpolation yet. if (pInfo->timeWindowInterpo) { - SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult); + SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult, tableGroupId); doInterpUnclosedTimeWindow(pOperatorInfo, numOfOutput, pResultRowInfo, pBlock, scanFlag, tsCols, &pos); // restore current time window ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } // window start key interpolation @@ -987,8 +990,8 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) && inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) { updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true); - doApplyFunctions(pTaskInfo, pSup->pCtx, &win, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols, - pBlock->info.rows, numOfOutput, pInfo->inputOrder); + doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows, + numOfOutput); } doCloseWindow(pResultRowInfo, pInfo, pResult); @@ -1011,7 +1014,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul int32_t code = setTimeWindowOutputBuf(pResultRowInfo, &nextWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) { @@ -1022,13 +1025,21 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul ekey = ascScan ? nextWin.ekey : nextWin.skey; forwardRows = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->inputOrder); - // window start(end) key interpolation doWindowBorderInterpolation(pInfo, pBlock, pResult, &nextWin, startPos, forwardRows, pSup); - + //TODO: add to open window? how to close the open windows after input blocks exhausted? +#if 0 + if ((ascScan && ekey <= pBlock->info.window.ekey) || + (!ascScan && ekey >= pBlock->info.window.skey)) { + // window start(end) key interpolation + doWindowBorderInterpolation(pInfo, pBlock, pResult, &nextWin, startPos, forwardRows, pSup); + } else if (pInfo->timeWindowInterpo) { + addToOpenWindowList(pResultRowInfo, pResult, tableGroupId); + } +#endif updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true); - doApplyFunctions(pTaskInfo, pSup->pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols, - pBlock->info.rows, numOfOutput, pInfo->inputOrder); + doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows, + numOfOutput); doCloseWindow(pResultRowInfo, pInfo, pResult); } @@ -1045,20 +1056,23 @@ void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInf } } -SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult) { - SResultRowPosition pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; +SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult, uint64_t groupId) { + SOpenWindowInfo openWin = {0}; + openWin.pos.pageId = pResult->pageId; + openWin.pos.offset = pResult->offset; + openWin.groupId = groupId; SListNode* pn = tdListGetTail(pResultRowInfo->openWindow); if (pn == NULL) { - tdListAppend(pResultRowInfo->openWindow, &pos); - return pos; + tdListAppend(pResultRowInfo->openWindow, &openWin); + return openWin.pos; } - SResultRowPosition* px = (SResultRowPosition*)pn->data; - if (px->pageId != pos.pageId || px->offset != pos.offset) { - tdListAppend(pResultRowInfo->openWindow, &pos); + SOpenWindowInfo * px = (SOpenWindowInfo *)pn->data; + if (px->pos.pageId != openWin.pos.pageId || px->pos.offset != openWin.pos.offset || px->groupId != openWin.groupId) { + tdListAppend(pResultRowInfo->openWindow, &openWin); } - return pos; + return openWin.pos; } int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo) { @@ -1187,12 +1201,12 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI int32_t ret = setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &window, masterScan, &pResult, gid, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code - longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &window, false); - doApplyFunctions(pTaskInfo, pSup->pCtx, &window, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, - pRowSup->numOfRows, NULL, pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC); + doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, + pRowSup->numOfRows, pBlock->info.rows, numOfOutput); // here we start a new session window doKeepNewWindowStartInfo(pRowSup, tsList, j, gid); @@ -1212,12 +1226,12 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI int32_t ret = setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &pRowSup->win, masterScan, &pResult, gid, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code - longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, false); - doApplyFunctions(pTaskInfo, pSup->pCtx, &pRowSup->win, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, - pRowSup->numOfRows, NULL, pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC); + doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, pRowSup->numOfRows, + pBlock->info.rows, numOfOutput); } static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) { @@ -1385,7 +1399,7 @@ bool doClearWindow(SAggSupporter* pAggSup, SExprSupp* pSup, char* pData, int16_t int32_t numOfOutput) { SET_RES_WINDOW_KEY(pAggSup->keyBuf, pData, bytes, groupId); SResultRowPosition* p1 = - (SResultRowPosition*)taosHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); + (SResultRowPosition*)tSimpleHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); if (!p1) { // window has been closed return false; @@ -1398,14 +1412,14 @@ bool doDeleteIntervalWindow(SAggSupporter* pAggSup, TSKEY ts, uint64_t groupId) size_t bytes = sizeof(TSKEY); SET_RES_WINDOW_KEY(pAggSup->keyBuf, &ts, bytes, groupId); SResultRowPosition* p1 = - (SResultRowPosition*)taosHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); + (SResultRowPosition*)tSimpleHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); if (!p1) { // window has been closed return false; } // SFilePage* bufPage = getBufPage(pAggSup->pResultBuf, p1->pageId); // dBufSetBufPageRecycled(pAggSup->pResultBuf, bufPage); - taosHashRemove(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); + tSimpleHashRemove(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); return true; } @@ -1420,7 +1434,7 @@ void doDeleteSpecifyIntervalWindow(SAggSupporter* pAggSup, SSDataBlock* pBlock, STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsStarts[i], pInterval, TSDB_ORDER_ASC); doDeleteIntervalWindow(pAggSup, win.skey, groupIds[i]); if (pUpWins) { - SWinRes winRes = {.ts = win.skey, .groupId = groupIds[i]}; + SWinKey winRes = {.ts = win.skey, .groupId = groupIds[i]}; taosArrayPush(pUpWins, &winRes); } } @@ -1447,7 +1461,7 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval* uint64_t winGpId = pGpDatas ? pGpDatas[startPos] : pBlock->info.groupId; bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TSKEY), winGpId, numOfOutput); if (pUpWins && res) { - SWinRes winRes = {.ts = win.skey, .groupId = winGpId}; + SWinKey winRes = {.ts = win.skey, .groupId = winGpId}; taosArrayPush(pUpWins, &winRes); } getNextTimeWindow(pInterval, pInterval->precision, TSDB_ORDER_ASC, &win); @@ -1455,11 +1469,13 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval* } } -static int32_t getAllIntervalWindow(SHashObj* pHashMap, SHashObj* resWins) { - void* pIte = NULL; - size_t keyLen = 0; - while ((pIte = taosHashIterate(pHashMap, pIte)) != NULL) { - void* key = taosHashGetKey(pIte, &keyLen); +static int32_t getAllIntervalWindow(SSHashObj* pHashMap, SHashObj* resWins) { + + void* pIte = NULL; + size_t keyLen = 0; + int32_t iter = 0; + while ((pIte = tSimpleHashIterate(pHashMap, pIte, &iter)) != NULL) { + void* key = tSimpleHashGetKey(pIte, &keyLen); uint64_t groupId = *(uint64_t*)key; ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))); TSKEY ts = *(int64_t*)((char*)key + sizeof(uint64_t)); @@ -1472,25 +1488,26 @@ static int32_t getAllIntervalWindow(SHashObj* pHashMap, SHashObj* resWins) { return TSDB_CODE_SUCCESS; } -static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, SInterval* pInterval, +static int32_t closeIntervalWindow(SSHashObj* pHashMap, STimeWindowAggSupp* pSup, SInterval* pInterval, SHashObj* pPullDataMap, SHashObj* closeWins, SArray* pRecyPages, SDiskbasedBuf* pDiscBuf) { qDebug("===stream===close interval window"); - void* pIte = NULL; - size_t keyLen = 0; - while ((pIte = taosHashIterate(pHashMap, pIte)) != NULL) { - void* key = taosHashGetKey(pIte, &keyLen); + void* pIte = NULL; + size_t keyLen = 0; + int32_t iter = 0; + while ((pIte = tSimpleHashIterate(pHashMap, pIte, &iter)) != NULL) { + void* key = tSimpleHashGetKey(pIte, &keyLen); uint64_t groupId = *(uint64_t*)key; ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))); TSKEY ts = *(int64_t*)((char*)key + sizeof(uint64_t)); STimeWindow win; win.skey = ts; win.ekey = taosTimeAdd(win.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1; - SWinRes winRe = { + SWinKey winRe = { .ts = win.skey, .groupId = groupId, }; - void* chIds = taosHashGet(pPullDataMap, &winRe, sizeof(SWinRes)); + void* chIds = taosHashGet(pPullDataMap, &winRe, sizeof(SWinKey)); if (isCloseWindow(&win, pSup)) { if (chIds && pPullDataMap) { SArray* chAy = *(SArray**)chIds; @@ -1517,7 +1534,7 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, } char keyBuf[GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))]; SET_RES_WINDOW_KEY(keyBuf, &ts, sizeof(TSKEY), groupId); - taosHashRemove(pHashMap, keyBuf, keyLen); + tSimpleHashIterateRemove(pHashMap, keyBuf, keyLen, &pIte, &iter); } } return TSDB_CODE_SUCCESS; @@ -1557,7 +1574,7 @@ static void doBuildDeleteResult(SArray* pWins, int32_t* index, SSDataBlock* pBlo SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX); SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX); for (int32_t i = *index; i < size; i++) { - SWinRes* pWin = taosArrayGet(pWins, i); + SWinKey* pWin = taosArrayGet(pWins, i); colDataAppend(pTsCol, pBlock->info.rows, (const char*)&pWin->ts, false); colDataAppend(pGroupCol, pBlock->info.rows, (const char*)&pWin->groupId, false); pBlock->info.rows++; @@ -1595,8 +1612,11 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { SOperatorInfo* downstream = pOperator->pDownstream[0]; SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); // SResKeyPos - _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_TIMESTAMP); + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK); + + SStreamState* pState = pTaskInfo->streamInfo.pState; + while (1) { SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); if (pBlock == NULL) { @@ -1641,6 +1661,35 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, MAIN_SCAN, pUpdatedMap); } +#if 0 + if (pState) { + printf(">>>>>>>> stream read backend\n"); + SWinKey key = { + .ts = 1, + .groupId = 2, + }; + char* val = NULL; + int32_t sz; + if (streamStateGet(pState, &key, (void**)&val, &sz) < 0) { + ASSERT(0); + } + printf("stream read %s %d\n", val, sz); + streamFreeVal(val); + + SStreamStateCur* pCur = streamStateGetCur(pState, &key); + ASSERT(pCur); + while (streamStateCurNext(pState, pCur) == 0) { + SWinKey key1; + const void* val1; + if (streamStateGetKVByCur(pCur, &key1, &val1, &sz) < 0) { + break; + } + printf("stream iter key groupId:%d ts:%d, value %s %d\n", key1.groupId, key1.ts, val1, sz); + } + streamStateFreeCur(pCur); + } +#endif + pOperator->status = OP_RES_TO_RETURN; closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdatedMap, pInfo->pRecycledPages, pInfo->aggSup.pResultBuf); @@ -1666,7 +1715,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes; } -static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput) { +static void destroyStateWindowOperatorInfo(void* param) { SStateWindowOperatorInfo* pInfo = (SStateWindowOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); taosMemoryFreeClear(pInfo->stateKey.pData); @@ -1679,7 +1728,7 @@ static void freeItem(void* param) { taosMemoryFree(pKey->pData); } -void destroyIntervalOperatorInfo(void* param, int32_t numOfOutput) { +void destroyIntervalOperatorInfo(void* param) { SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); cleanupAggSup(&pInfo->aggSup); @@ -1696,7 +1745,7 @@ void destroyIntervalOperatorInfo(void* param, int32_t numOfOutput) { taosMemoryFreeClear(param); } -void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) { +void destroyStreamFinalIntervalOperatorInfo(void* param) { SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); cleanupAggSup(&pInfo->aggSup); @@ -1706,14 +1755,19 @@ void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) { blockDataDestroy(pInfo->pPullDataRes); taosArrayDestroy(pInfo->pRecycledPages); blockDataDestroy(pInfo->pUpdateRes); + taosArrayDestroy(pInfo->pDelWins); + blockDataDestroy(pInfo->pDelRes); if (pInfo->pChildren) { int32_t size = taosArrayGetSize(pInfo->pChildren); for (int32_t i = 0; i < size; i++) { SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, i); - destroyStreamFinalIntervalOperatorInfo(pChildOp->info, numOfOutput); + destroyStreamFinalIntervalOperatorInfo(pChildOp->info); + taosMemoryFree(pChildOp->pDownstream); + cleanupExprSupp(&pChildOp->exprSupp); taosMemoryFreeClear(pChildOp); } + taosArrayDestroy(pInfo->pChildren); } nodesDestroyNode((SNode*)pInfo->pPhyNode); colDataDestroy(&pInfo->twAggSup.timeWindowData); @@ -1774,13 +1828,7 @@ static bool timeWindowinterpNeeded(SqlFunctionCtx* pCtx, int32_t numOfCols, SInt return needed; } -void increaseTs(SqlFunctionCtx* pCtx) { - if (pCtx[0].pExpr->pExpr->_function.pFunctNode->funcType == FUNCTION_TYPE_WSTART) { - pCtx[0].increase = true; - } -} - -void initIntervalDownStream(SOperatorInfo* downstream, uint8_t type, SAggSupporter* pSup) { +void initIntervalDownStream(SOperatorInfo* downstream, uint16_t type, SAggSupporter* pSup) { if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { // Todo(liuyao) support partition by column return; @@ -1790,6 +1838,12 @@ void initIntervalDownStream(SOperatorInfo* downstream, uint8_t type, SAggSupport pScanInfo->sessionSup.pIntervalAggSup = pSup; } +void initStreamFunciton(SqlFunctionCtx* pCtx, int32_t numOfExpr) { + for (int32_t i = 0; i < numOfExpr; i++) { + pCtx[i].isStream = true; + } +} + SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, STimeWindowAggSupp* pTwAggSupp, SIntervalPhysiNode* pPhyNode, @@ -1827,11 +1881,15 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* initResultSizeInfo(&pOperator->resultInfo, 4096); int32_t code = initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + initBasicInfo(&pInfo->binfo, pResBlock); if (isStream) { ASSERT(numOfCols > 0); - increaseTs(pSup->pCtx); + initStreamFunciton(pSup->pCtx, pSup->numOfExprs); } initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pInfo->win); @@ -1841,13 +1899,14 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pInfo->timeWindowInterpo = timeWindowinterpNeeded(pSup->pCtx, numOfCols, pInfo); if (pInfo->timeWindowInterpo) { - pInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SResultRowPosition)); + pInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SOpenWindowInfo)); if (pInfo->binfo.resultRowInfo.openWindow == NULL) { goto _error; } } + pInfo->pRecycledPages = taosArrayInit(4, sizeof(int32_t)); - pInfo->pDelWins = taosArrayInit(4, sizeof(SWinRes)); + pInfo->pDelWins = taosArrayInit(4, sizeof(SWinKey)); pInfo->delIndex = 0; pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT); initResultRowInfo(&pInfo->binfo.resultRowInfo); @@ -1856,8 +1915,6 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL; pOperator->blocking = true; pOperator->status = OP_NOT_OPENED; - pOperator->exprSupp.pExprInfo = pExprInfo; - pOperator->exprSupp.numOfExprs = numOfCols; pOperator->info = pInfo; pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doBuildIntervalResult, doStreamIntervalAgg, NULL, @@ -1875,63 +1932,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* return pOperator; _error: - destroyIntervalOperatorInfo(pInfo, numOfCols); - taosMemoryFreeClear(pOperator); - pTaskInfo->code = code; - return NULL; -} - -SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, - SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) { - SIntervalAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SIntervalAggOperatorInfo)); - SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); - if (pInfo == NULL || pOperator == NULL) { - goto _error; - } - - pOperator->pTaskInfo = pTaskInfo; - pInfo->inputOrder = TSDB_ORDER_ASC; - pInfo->interval = *pInterval; - pInfo->execModel = OPTR_EXEC_MODEL_STREAM; - pInfo->win = pTaskInfo->window; - pInfo->twAggSup = *pTwAggSupp; - pInfo->primaryTsIndex = primaryTsSlotId; - - int32_t numOfRows = 4096; - size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; - - initResultSizeInfo(&pOperator->resultInfo, numOfRows); - int32_t code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str); - initBasicInfo(&pInfo->binfo, pResBlock); - initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pInfo->win); - - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - initResultRowInfo(&pInfo->binfo.resultRowInfo); - - pOperator->name = "StreamTimeIntervalAggOperator"; - pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL; - pOperator->blocking = true; - pOperator->status = OP_NOT_OPENED; - pOperator->exprSupp.pExprInfo = pExprInfo; - pOperator->exprSupp.numOfExprs = numOfCols; - pOperator->info = pInfo; - - pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doStreamIntervalAgg, doStreamIntervalAgg, NULL, - destroyIntervalOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL); - - code = appendDownstream(pOperator, &downstream, 1); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } - - return pOperator; - -_error: - destroyIntervalOperatorInfo(pInfo, numOfCols); + destroyIntervalOperatorInfo(pInfo); taosMemoryFreeClear(pOperator); pTaskInfo->code = code; return NULL; @@ -1964,8 +1965,8 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator if (gid != pRowSup->groupId || pInfo->winSup.prevTs == INT64_MIN) { doKeepNewWindowStartInfo(pRowSup, tsList, j, gid); doKeepTuple(pRowSup, tsList[j], gid); - } else if ((tsList[j] - pRowSup->prevTs >= 0) && tsList[j] - pRowSup->prevTs <= gap || - (pRowSup->prevTs - tsList[j] >= 0) && (pRowSup->prevTs - tsList[j] <= gap)) { + } else if (((tsList[j] - pRowSup->prevTs >= 0) && (tsList[j] - pRowSup->prevTs <= gap)) || + ((pRowSup->prevTs - tsList[j] >= 0) && (pRowSup->prevTs - tsList[j] <= gap))) { // The gap is less than the threshold, so it belongs to current session window that has been opened already. doKeepTuple(pRowSup, tsList[j], gid); if (j == 0 && pRowSup->startRowIndex != 0) { @@ -1981,13 +1982,13 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator int32_t ret = setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &window, masterScan, &pResult, gid, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code - longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); } // pInfo->numOfRows data belong to the current session window updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &window, false); - doApplyFunctions(pTaskInfo, pSup->pCtx, &window, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, - pRowSup->numOfRows, NULL, pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC); + doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, + pRowSup->numOfRows, pBlock->info.rows, numOfOutput); // here we start a new session window doKeepNewWindowStartInfo(pRowSup, tsList, j, gid); @@ -2000,12 +2001,12 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator int32_t ret = setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &pRowSup->win, masterScan, &pResult, gid, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code - longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR); } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, false); - doApplyFunctions(pTaskInfo, pSup->pCtx, &pRowSup->win, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, - pRowSup->numOfRows, NULL, pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC); + doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, pRowSup->numOfRows, + pBlock->info.rows, numOfOutput); } static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) { @@ -2154,10 +2155,11 @@ static void doKeepLinearInfo(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlo static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pResBlock) { int32_t rows = pResBlock->info.rows; - + blockDataEnsureCapacity(pResBlock, rows + 1); // todo set the correct primary timestamp column // output the result + bool hasInterp = true; for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) { SExprInfo* pExprInfo = &pExprSup->pExprInfo[j]; int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId; @@ -2169,7 +2171,6 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp switch (pSliceInfo->fillType) { case TSDB_FILL_NULL: { colDataAppendNULL(pDst, rows); - pResBlock->info.rows += 1; break; } @@ -2189,7 +2190,6 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp GET_TYPED_DATA(v, int64_t, pVar->nType, &pVar->i); colDataAppend(pDst, rows, (char*)&v, false); } - pResBlock->info.rows += 1; break; } @@ -2203,6 +2203,7 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp // before interp range, do not fill if (start.key == INT64_MIN || end.key == INT64_MAX) { + hasInterp = false; break; } @@ -2214,28 +2215,27 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp } taosMemoryFree(current.val); - pResBlock->info.rows += 1; break; } case TSDB_FILL_PREV: { if (!pSliceInfo->isPrevRowSet) { + hasInterp = false; break; } SGroupKeys* pkey = taosArrayGet(pSliceInfo->pPrevRow, srcSlot); colDataAppend(pDst, rows, pkey->pData, false); - pResBlock->info.rows += 1; break; } case TSDB_FILL_NEXT: { if (!pSliceInfo->isNextRowSet) { + hasInterp = false; break; } SGroupKeys* pkey = taosArrayGet(pSliceInfo->pNextRow, srcSlot); colDataAppend(pDst, rows, pkey->pData, false); - pResBlock->info.rows += 1; break; } @@ -2244,6 +2244,11 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp break; } } + + if (hasInterp) { + pResBlock->info.rows += 1; + } + } static int32_t initPrevRowsKeeper(STimeSliceOperatorInfo* pInfo, SSDataBlock* pBlock) { @@ -2388,7 +2393,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { int32_t code = initKeeperInfo(pSliceInfo, pBlock); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } // the pDataBlock are always the same one, no need to call this again @@ -2408,11 +2413,11 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { break; } } + } - if (pSliceInfo->current > pSliceInfo->win.ekey) { - doSetOperatorCompleted(pOperator); - break; - } + if (pSliceInfo->current > pSliceInfo->win.ekey) { + doSetOperatorCompleted(pOperator); + break; } if (ts == pSliceInfo->current) { @@ -2424,6 +2429,11 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot); SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot); + if (colDataIsNull_s(pSrc, i)) { + colDataAppendNULL(pDst, pResBlock->info.rows); + continue; + } + char* v = colDataGetData(pSrc, i); colDataAppend(pDst, pResBlock->info.rows, v, false); } @@ -2616,7 +2626,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { return pResBlock->info.rows == 0 ? NULL : pResBlock; } -void destroyTimeSliceOperatorInfo(void* param, int32_t numOfOutput) { +void destroyTimeSliceOperatorInfo(void* param) { STimeSliceOperatorInfo* pInfo = (STimeSliceOperatorInfo*)param; pInfo->pRes = blockDataDestroy(pInfo->pRes); @@ -2644,7 +2654,6 @@ void destroyTimeSliceOperatorInfo(void* param, int32_t numOfOutput) { taosMemoryFreeClear(param); } - SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo) { STimeSliceOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(STimeSliceOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); @@ -2704,20 +2713,26 @@ _error: return NULL; } -SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols, - SSDataBlock* pResBlock, STimeWindowAggSupp* pTwAggSup, int32_t tsSlotId, - SColumn* pStateKeyCol, SNode* pCondition, SExecTaskInfo* pTaskInfo) { +SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWinodwPhysiNode* pStateNode, + SExecTaskInfo* pTaskInfo) { SStateWindowOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStateWindowOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { goto _error; } - pInfo->stateCol = *pStateKeyCol; + int32_t num = 0; + SExprInfo* pExprInfo = createExprInfo(pStateNode->window.pFuncs, NULL, &num); + SSDataBlock* pResBlock = createResDataBlock(pStateNode->window.node.pOutputDataBlockDesc); + int32_t tsSlotId = ((SColumnNode*)pStateNode->window.pTspk)->slotId; + + SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pStateNode->pStateKey)->pExpr; + + pInfo->stateCol = extractColumnFromColumnNode(pColNode); pInfo->stateKey.type = pInfo->stateCol.type; pInfo->stateKey.bytes = pInfo->stateCol.bytes; pInfo->stateKey.pData = taosMemoryCalloc(1, pInfo->stateCol.bytes); - pInfo->pCondition = pCondition; + pInfo->pCondition = pStateNode->window.node.pConditions; if (pInfo->stateKey.pData == NULL) { goto _error; } @@ -2725,12 +2740,15 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; initResultSizeInfo(&pOperator->resultInfo, 4096); - initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExpr, numOfCols, keyBufSize, pTaskInfo->id.str); - initBasicInfo(&pInfo->binfo, pResBlock); + int32_t code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + initBasicInfo(&pInfo->binfo, pResBlock); initResultRowInfo(&pInfo->binfo.resultRowInfo); - pInfo->twAggSup = *pTwAggSup; + pInfo->twAggSup = (STimeWindowAggSupp){.waterMark = pStateNode->window.watermark, .calTrigger = pStateNode->window.triggerType};; initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); pInfo->tsSlotId = tsSlotId; @@ -2738,26 +2756,33 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE; pOperator->blocking = true; pOperator->status = OP_NOT_OPENED; - pOperator->exprSupp.pExprInfo = pExpr; - pOperator->exprSupp.numOfExprs = numOfCols; pOperator->pTaskInfo = pTaskInfo; pOperator->info = pInfo; pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStateWindowAgg, NULL, NULL, destroyStateWindowOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL); - int32_t code = appendDownstream(pOperator, &downstream, 1); + code = appendDownstream(pOperator, &downstream, 1); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + return pOperator; _error: - pTaskInfo->code = TSDB_CODE_SUCCESS; + destroyStateWindowOperatorInfo(pInfo); + taosMemoryFreeClear(pOperator); + pTaskInfo->code = code; return NULL; } -void destroySWindowOperatorInfo(void* param, int32_t numOfOutput) { +void destroySWindowOperatorInfo(void* param) { SSessionAggOperatorInfo* pInfo = (SSessionAggOperatorInfo*)param; - cleanupBasicInfo(&pInfo->binfo); + if (pInfo == NULL) { + return; + } + cleanupBasicInfo(&pInfo->binfo); colDataDestroy(&pInfo->twAggSup.timeWindowData); cleanupAggSup(&pInfo->aggSup); @@ -2804,22 +2829,20 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionW pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION; pOperator->blocking = true; pOperator->status = OP_NOT_OPENED; - pOperator->exprSupp.pExprInfo = pExprInfo; - pOperator->exprSupp.numOfExprs = numOfCols; pOperator->info = pInfo; pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doSessionWindowAgg, NULL, NULL, destroySWindowOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL); pOperator->pTaskInfo = pTaskInfo; - code = appendDownstream(pOperator, &downstream, 1); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + return pOperator; _error: - if (pInfo != NULL) { - destroySWindowOperatorInfo(pInfo, numOfCols); - } - + destroySWindowOperatorInfo(pInfo); taosMemoryFreeClear(pOperator); pTaskInfo->code = code; return NULL; @@ -2837,7 +2860,7 @@ void compactFunctions(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx, int3 if (code != TSDB_CODE_SUCCESS) { qError("%s apply functions error, code: %s", GET_TASKID(pTaskInfo), tstrerror(code)); pTaskInfo->code = code; - longjmp(pTaskInfo->env, code); + T_LONG_JMP(pTaskInfo->env, code); } } } @@ -2847,7 +2870,7 @@ bool hasIntervalWindow(SAggSupporter* pSup, TSKEY ts, uint64_t groupId) { int32_t bytes = sizeof(TSKEY); SET_RES_WINDOW_KEY(pSup->keyBuf, &ts, bytes, groupId); SResultRowPosition* p1 = - (SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); + (SResultRowPosition*)tSimpleHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); return p1 != NULL; } @@ -2858,7 +2881,7 @@ static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SExpr return; } for (int32_t i = 0; i < size; i++) { - SWinRes* pWinRes = taosArrayGet(pWinArray, i); + SWinKey* pWinRes = taosArrayGet(pWinArray, i); SResultRow* pCurResult = NULL; STimeWindow ParentWin = {.skey = pWinRes->ts, .ekey = pWinRes->ts + 1}; setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &ParentWin, true, &pCurResult, pWinRes->groupId, pSup->pCtx, @@ -2888,7 +2911,7 @@ static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SExpr bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup) { SET_RES_WINDOW_KEY(pSup->keyBuf, &pWin->skey, sizeof(int64_t), groupId); - SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf, + SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(sizeof(int64_t))); return p1 == NULL; } @@ -2901,12 +2924,12 @@ int32_t getNexWindowPos(SInterval* pInterval, SDataBlockInfo* pBlockInfo, TSKEY* return getNextQualifiedWindow(pInterval, pNextWin, pBlockInfo, tsCols, prevEndPos, TSDB_ORDER_ASC); } -void addPullWindow(SHashObj* pMap, SWinRes* pWinRes, int32_t size) { +void addPullWindow(SHashObj* pMap, SWinKey* pWinRes, int32_t size) { SArray* childIds = taosArrayInit(8, sizeof(int32_t)); for (int32_t i = 0; i < size; i++) { taosArrayPush(childIds, &i); } - taosHashPut(pMap, pWinRes, sizeof(SWinRes), &childIds, sizeof(void*)); + taosHashPut(pMap, pWinRes, sizeof(SWinKey), &childIds, sizeof(void*)); } static int32_t getChildIndex(SSDataBlock* pBlock) { return pBlock->info.childId; } @@ -2953,11 +2976,11 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc } if (IS_FINAL_OP(pInfo) && isClosed && pInfo->pChildren) { bool ignore = true; - SWinRes winRes = { + SWinKey winRes = { .ts = nextWin.skey, .groupId = tableGroupId, }; - void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinRes)); + void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinKey)); if (isDeletedWindow(&nextWin, tableGroupId, &pInfo->aggSup) && !chIds) { SPullWindowInfo pull = {.window = nextWin, .groupId = tableGroupId}; // add pull data request @@ -2991,7 +3014,7 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc int32_t code = setTimeWindowOutputBuf(pResultRowInfo, &nextWin, true, &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } if (IS_FINAL_OP(pInfo)) { @@ -3005,8 +3028,8 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pResultRowInfo->cur); } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true); - doApplyFunctions(pTaskInfo, pSup->pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols, - pSDataBlock->info.rows, numOfOutput, TSDB_ORDER_ASC); + doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, + pSDataBlock->info.rows, numOfOutput); int32_t prevEndPos = (forwardRows - 1) * step + startPos; ASSERT(pSDataBlock->info.window.skey > 0 && pSDataBlock->info.window.ekey > 0); startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pSDataBlock->info, tsCols, prevEndPos, pInfo->order); @@ -3017,10 +3040,10 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc } static void clearStreamIntervalOperator(SStreamFinalIntervalOperatorInfo* pInfo) { - taosHashClear(pInfo->aggSup.pResultRowHashTable); + tSimpleHashClear(pInfo->aggSup.pResultRowHashTable); clearDiskbasedBuf(pInfo->aggSup.pResultBuf); - cleanupResultRowInfo(&pInfo->binfo.resultRowInfo); initResultRowInfo(&pInfo->binfo.resultRowInfo); + pInfo->aggSup.currentPageId = -1; } static void clearSpecialDataBlock(SSDataBlock* pBlock) { @@ -3086,8 +3109,8 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) { uint64_t* groupIdData = (uint64_t*)pGroupCol->pData; int32_t chId = getChildIndex(pBlock); for (int32_t i = 0; i < pBlock->info.rows; i++) { - SWinRes winRes = {.ts = tsData[i], .groupId = groupIdData[i]}; - void* chIds = taosHashGet(pMap, &winRes, sizeof(SWinRes)); + SWinKey winRes = {.ts = tsData[i], .groupId = groupIdData[i]}; + void* chIds = taosHashGet(pMap, &winRes, sizeof(SWinKey)); if (chIds) { SArray* chArray = *(SArray**)chIds; int32_t index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ); @@ -3096,7 +3119,7 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) { taosArrayRemove(chArray, index); if (taosArrayGetSize(chArray) == 0) { // pull data is over - taosHashRemove(pMap, &winRes, sizeof(SWinRes)); + taosHashRemove(pMap, &winRes, sizeof(SWinKey)); } } } @@ -3105,11 +3128,12 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) { static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { SStreamFinalIntervalOperatorInfo* pInfo = pOperator->info; - SOperatorInfo* downstream = pOperator->pDownstream[0]; - SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); - _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_TIMESTAMP); - SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK); - TSKEY maxTs = INT64_MIN; + + SOperatorInfo* downstream = pOperator->pDownstream[0]; + SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK); + TSKEY maxTs = INT64_MIN; SExprSupp* pSup = &pOperator->exprSupp; @@ -3175,11 +3199,11 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { maxTs = TMAX(maxTs, pBlock->info.window.ekey); maxTs = TMAX(maxTs, pBlock->info.watermark); - if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PULL_DATA || - pBlock->info.type == STREAM_INVALID) { + ASSERT(pBlock->info.type != STREAM_INVERT); + if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PULL_DATA) { pInfo->binfo.pRes->info.type = pBlock->info.type; } else if (pBlock->info.type == STREAM_CLEAR) { - SArray* pUpWins = taosArrayInit(8, sizeof(SWinRes)); + SArray* pUpWins = taosArrayInit(8, sizeof(SWinKey)); doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, pOperator->exprSupp.numOfExprs, pBlock, pUpWins); if (IS_FINAL_OP(pInfo)) { int32_t childIndex = getChildIndex(pBlock); @@ -3217,7 +3241,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdatedMap); continue; } else if (pBlock->info.type == STREAM_RETRIEVE && !IS_FINAL_OP(pInfo)) { - SArray* pUpWins = taosArrayInit(8, sizeof(SWinRes)); + SArray* pUpWins = taosArrayInit(8, sizeof(SWinKey)); doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, pOperator->exprSupp.numOfExprs, pBlock, pUpWins); removeResults(pUpWins, pUpdatedMap); taosArrayDestroy(pUpWins); @@ -3243,7 +3267,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { for (int32_t i = 0; i < chIndex + 1 - size; i++) { SOperatorInfo* pChildOp = createStreamFinalIntervalOperatorInfo(NULL, pInfo->pPhyNode, pOperator->pTaskInfo, 0); if (!pChildOp) { - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } SStreamFinalIntervalOperatorInfo* pTmpInfo = pChildOp->info; pTmpInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE; @@ -3382,14 +3406,16 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); int32_t code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + + initStreamFunciton(pOperator->exprSupp.pCtx, pOperator->exprSupp.numOfExprs); initBasicInfo(&pInfo->binfo, pResBlock); ASSERT(numOfCols > 0); - increaseTs(pOperator->exprSupp.pCtx); initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); - if (code != TSDB_CODE_SUCCESS) { - goto _error; - } + initResultRowInfo(&pInfo->binfo.resultRowInfo); pInfo->pChildren = NULL; if (numOfChild > 0) { @@ -3418,6 +3444,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, // semi interval operator does not catch result pInfo->isFinal = false; pOperator->name = "StreamSemiIntervalOperator"; + ASSERT(pInfo->aggSup.currentPageId == -1); } if (!IS_FINAL_OP(pInfo) || numOfChild == 0) { @@ -3431,14 +3458,12 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, pInfo->ignoreExpiredData = pIntervalPhyNode->window.igExpired; pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT); pInfo->delIndex = 0; - pInfo->pDelWins = taosArrayInit(4, sizeof(SWinRes)); + pInfo->pDelWins = taosArrayInit(4, sizeof(SWinKey)); pInfo->pRecycledPages = taosArrayInit(4, sizeof(int32_t)); pOperator->operatorType = pPhyNode->type; pOperator->blocking = true; pOperator->status = OP_NOT_OPENED; - pOperator->exprSupp.pExprInfo = pExprInfo; - pOperator->exprSupp.numOfExprs = numOfCols; pOperator->info = pInfo; pOperator->fpSet = @@ -3455,7 +3480,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, return pOperator; _error: - destroyStreamFinalIntervalOperatorInfo(pInfo, numOfCols); + destroyStreamFinalIntervalOperatorInfo(pInfo); taosMemoryFreeClear(pOperator); pTaskInfo->code = code; return NULL; @@ -3493,7 +3518,7 @@ void destroyStateStreamAggSupporter(SStreamAggSupporter* pSup) { blockDataDestroy(pSup->pScanBlock); } -void destroyStreamSessionAggOperatorInfo(void* param, int32_t numOfOutput) { +void destroyStreamSessionAggOperatorInfo(void* param) { SStreamSessionAggOperatorInfo* pInfo = (SStreamSessionAggOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); destroyStreamAggSupporter(&pInfo->streamAggSup); @@ -3503,7 +3528,7 @@ void destroyStreamSessionAggOperatorInfo(void* param, int32_t numOfOutput) { for (int32_t i = 0; i < size; i++) { SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i); SStreamSessionAggOperatorInfo* pChInfo = pChild->info; - destroyStreamSessionAggOperatorInfo(pChInfo, numOfOutput); + destroyStreamSessionAggOperatorInfo(pChInfo); taosMemoryFreeClear(pChild); } } @@ -3523,15 +3548,15 @@ int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo* if (code != TSDB_CODE_SUCCESS) { return code; } + initStreamFunciton(pSup->pCtx, pSup->numOfExprs); initBasicInfo(pBasicInfo, pResultBlock); for (int32_t i = 0; i < numOfCols; ++i) { - pSup->pCtx[i].pBuf = NULL; + pSup->pCtx[i].saveHandle.pBuf = NULL; } ASSERT(numOfCols > 0); - increaseTs(pSup->pCtx); return TSDB_CODE_SUCCESS; } @@ -3542,7 +3567,7 @@ void initDummyFunction(SqlFunctionCtx* pDummy, SqlFunctionCtx* pCtx, int32_t num } void initDownStream(SOperatorInfo* downstream, SStreamAggSupporter* pAggSup, int64_t gap, int64_t waterMark, - uint8_t type) { + uint16_t type) { ASSERT(downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN); SStreamScanInfo* pScanInfo = downstream->info; pScanInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = pAggSup, .gap = gap, .parentType = type}; @@ -3573,7 +3598,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh if (pSessionNode->window.pExprs != NULL) { int32_t numOfScalar = 0; SExprInfo* pScalarExprInfo = createExprInfo(pSessionNode->window.pExprs, NULL, &numOfScalar); - int32_t code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar); + code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -3623,8 +3648,6 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION; pOperator->blocking = true; pOperator->status = OP_NOT_OPENED; - pOperator->exprSupp.pExprInfo = pExprInfo; - pOperator->exprSupp.numOfExprs = numOfCols; pOperator->info = pInfo; pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamSessionAgg, NULL, NULL, destroyStreamSessionAggOperatorInfo, @@ -3637,7 +3660,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh _error: if (pInfo != NULL) { - destroyStreamSessionAggOperatorInfo(pInfo, numOfCols); + destroyStreamSessionAggOperatorInfo(pInfo); } taosMemoryFreeClear(pOperator); @@ -3765,8 +3788,8 @@ int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs, TS } if (pWinInfo->win.skey > pStartTs[i]) { if (pStDeleted && pWinInfo->isOutput) { - SWinRes res = {.ts = pWinInfo->win.skey, .groupId = groupId}; - taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes)); + SWinKey res = {.ts = pWinInfo->win.skey, .groupId = groupId}; + taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinKey)); pWinInfo->isOutput = false; } pWinInfo->win.skey = pStartTs[i]; @@ -3786,11 +3809,11 @@ static int32_t setWindowOutputBuf(SResultWindowInfo* pWinInfo, SResultRow** pRes // too many time window in query int32_t size = taosArrayGetSize(pAggSup->pCurWins); if (pTaskInfo->execModel == OPTR_EXEC_MODEL_BATCH && size > MAX_INTERVAL_TIME_WINDOW) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW); } if (pWinInfo->pos.pageId == -1) { - *pResult = getNewResultRow(pAggSup->pResultBuf, groupId, pAggSup->resultRowSize); + *pResult = getNewResultRow(pAggSup->pResultBuf, &pAggSup->currentPageId, pAggSup->resultRowSize); if (*pResult == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -3828,8 +3851,7 @@ static int32_t doOneWindowAggImpl(int32_t tsColId, SOptrBasicInfo* pBinfo, SStre return TSDB_CODE_QRY_OUT_OF_MEMORY; } updateTimeWindowInfo(pTimeWindowData, &pCurWin->win, false); - doApplyFunctions(pTaskInfo, pSup->pCtx, &pCurWin->win, pTimeWindowData, startIndex, winRows, tsCols, - pSDataBlock->info.rows, numOutput, TSDB_ORDER_ASC); + doApplyFunctions(pTaskInfo, pSup->pCtx, pTimeWindowData, startIndex, winRows, pSDataBlock->info.rows, numOutput); SFilePage* bufPage = getBufPage(pAggSup->pResultBuf, pCurWin->pos.pageId); setBufPageDirty(bufPage, true); releaseBufPage(pAggSup->pResultBuf, bufPage); @@ -3885,8 +3907,8 @@ void compactTimeWindow(SStreamSessionAggOperatorInfo* pInfo, int32_t startIndex, compactFunctions(pSup->pCtx, pInfo->pDummyCtx, numOfOutput, pTaskInfo); taosHashRemove(pStUpdated, &pWinInfo->pos, sizeof(SResultRowPosition)); if (pWinInfo->isOutput) { - SWinRes res = {.ts = pWinInfo->win.skey, .groupId = groupId}; - taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes)); + SWinKey res = {.ts = pWinInfo->win.skey, .groupId = groupId}; + taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinKey)); pWinInfo->isOutput = false; } taosArrayRemove(pInfo->streamAggSup.pCurWins, i); @@ -3939,7 +3961,7 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData pStDeleted); code = doOneWindowAgg(pInfo, pSDataBlock, pCurWin, &pResult, i, winRows, numOfOutput, pOperator); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } int32_t winNum = getNumCompactWindow(pAggSup->pCurWins, winIndex, gap); @@ -3948,10 +3970,10 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData } pCurWin->isClosed = false; if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pStUpdated) { - SWinRes value = {.ts = pCurWin->win.skey, .groupId = groupId}; - code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &value, sizeof(SWinRes)); + SWinKey value = {.ts = pCurWin->win.skey, .groupId = groupId}; + code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &value, sizeof(SWinKey)); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } pCurWin->isOutput = true; } @@ -3996,11 +4018,12 @@ static void doClearSessionWindows(SStreamAggSupporter* pAggSup, SExprSupp* pSup, int32_t numOfOutput, int64_t gap, SArray* result) { SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, tsIndex); TSKEY* tsCols = (TSKEY*)pColDataInfo->pData; + SColumnInfoData* pGpDataInfo = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX); + uint64_t* gpCols = (uint64_t*)pGpDataInfo->pData; int32_t step = 0; for (int32_t i = 0; i < pBlock->info.rows; i += step) { int32_t winIndex = 0; - SResultWindowInfo* pCurWin = - getCurSessionWindow(pAggSup, tsCols[i], INT64_MIN, pBlock->info.groupId, gap, &winIndex); + SResultWindowInfo* pCurWin = getCurSessionWindow(pAggSup, tsCols[i], INT64_MIN, gpCols[i], gap, &winIndex); if (!pCurWin || pCurWin->pos.pageId == -1) { // window has been closed. step = 1; @@ -4025,9 +4048,9 @@ static int32_t copyUpdateResult(SHashObj* pStUpdated, SArray* pUpdated) { if (pos == NULL) { return TSDB_CODE_QRY_OUT_OF_MEMORY; } - pos->groupId = ((SWinRes*)pData)->groupId; + pos->groupId = ((SWinKey*)pData)->groupId; pos->pos = *(SResultRowPosition*)key; - *(int64_t*)pos->key = ((SWinRes*)pData)->ts; + *(int64_t*)pos->key = ((SWinKey*)pData)->ts; taosArrayPush(pUpdated, &pos); } taosArraySort(pUpdated, resultrowComparAsc); @@ -4043,7 +4066,7 @@ void doBuildDeleteDataBlock(SHashObj* pStDeleted, SSDataBlock* pBlock, void** It blockDataEnsureCapacity(pBlock, size); size_t keyLen = 0; while (((*Ite) = taosHashIterate(pStDeleted, *Ite)) != NULL) { - SWinRes* res = *Ite; + SWinKey* res = *Ite; SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX); colDataAppend(pTsCol, pBlock->info.rows, (const char*)&res->ts, false); SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX); @@ -4174,8 +4197,8 @@ static void copyDeleteWindowInfo(SArray* pResWins, SHashObj* pStDeleted) { int32_t size = taosArrayGetSize(pResWins); for (int32_t i = 0; i < size; i++) { SResultWindowInfo* pWinInfo = taosArrayGet(pResWins, i); - SWinRes res = {.ts = pWinInfo->win.skey, .groupId = pWinInfo->groupId}; - taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes)); + SWinKey res = {.ts = pWinInfo->win.skey, .groupId = pWinInfo->groupId}; + taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinKey)); } } @@ -4213,14 +4236,14 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { if (pBlock->info.type == STREAM_CLEAR) { SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo)); - doClearSessionWindows(&pInfo->streamAggSup, &pOperator->exprSupp, pBlock, 0, pOperator->exprSupp.numOfExprs, 0, - pWins); + doClearSessionWindows(&pInfo->streamAggSup, &pOperator->exprSupp, pBlock, START_TS_COLUMN_INDEX, + pOperator->exprSupp.numOfExprs, 0, pWins); if (IS_FINAL_OP(pInfo)) { int32_t childIndex = getChildIndex(pBlock); SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex); SStreamSessionAggOperatorInfo* pChildInfo = pChildOp->info; - doClearSessionWindows(&pChildInfo->streamAggSup, &pChildOp->exprSupp, pBlock, 0, pChildOp->exprSupp.numOfExprs, - 0, NULL); + doClearSessionWindows(&pChildInfo->streamAggSup, &pChildOp->exprSupp, pBlock, START_TS_COLUMN_INDEX, + pChildOp->exprSupp.numOfExprs, 0, NULL); rebuildTimeWindow(pInfo, pWins, pBlock->info.groupId, pOperator->exprSupp.numOfExprs, pOperator); } taosArrayDestroy(pWins); @@ -4260,7 +4283,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { SOperatorInfo* pChildOp = createStreamFinalSessionAggOperatorInfo(NULL, pInfo->pPhyNode, pOperator->pTaskInfo, 0); if (!pChildOp) { - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } taosArrayPush(pInfo->pChildren, &pChildOp); } @@ -4307,8 +4330,7 @@ static void clearStreamSessionOperator(SStreamSessionAggOperatorInfo* pInfo) { } } clearDiskbasedBuf(pInfo->streamAggSup.pResultBuf); - cleanupResultRowInfo(&pInfo->binfo.resultRowInfo); - initResultRowInfo(&pInfo->binfo.resultRowInfo); + pInfo->streamAggSup.currentPageId = -1; } static void removeSessionResults(SHashObj* pHashMap, SArray* pWins) { @@ -4330,21 +4352,21 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) { } else if (pOperator->status == OP_RES_TO_RETURN) { doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf); if (pBInfo->pRes->info.rows > 0) { - printDataBlock(pBInfo->pRes, "sems session"); + printDataBlock(pBInfo->pRes, "semi session"); return pBInfo->pRes; } // doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator); if (pInfo->pDelRes->info.rows > 0 && !pInfo->returnDelete) { pInfo->returnDelete = true; - printDataBlock(pInfo->pDelRes, "sems session"); + printDataBlock(pInfo->pDelRes, "semi session"); return pInfo->pDelRes; } if (pInfo->pUpdateRes->info.rows > 0) { // process the rest of the data pOperator->status = OP_OPENED; - printDataBlock(pInfo->pUpdateRes, "sems session"); + printDataBlock(pInfo->pUpdateRes, "semi session"); return pInfo->pUpdateRes; } // semi interval operator clear disk buffer @@ -4363,13 +4385,14 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) { clearSpecialDataBlock(pInfo->pUpdateRes); break; } + printDataBlock(pBlock, "semi session recv"); if (pBlock->info.type == STREAM_CLEAR) { SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo)); - doClearSessionWindows(&pInfo->streamAggSup, pSup, pBlock, 0, pSup->numOfExprs, 0, pWins); + doClearSessionWindows(&pInfo->streamAggSup, pSup, pBlock, START_TS_COLUMN_INDEX, pSup->numOfExprs, 0, pWins); removeSessionResults(pStUpdated, pWins); taosArrayDestroy(pWins); - copyUpdateDataBlock(pInfo->pUpdateRes, pBlock, pInfo->primaryTsIndex); + copyDataBlock(pInfo->pUpdateRes, pBlock); break; } else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) { // gap must be 0 @@ -4409,21 +4432,21 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) { doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf); if (pBInfo->pRes->info.rows > 0) { - printDataBlock(pBInfo->pRes, "sems session"); + printDataBlock(pBInfo->pRes, "semi session"); return pBInfo->pRes; } // doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator); if (pInfo->pDelRes->info.rows > 0 && !pInfo->returnDelete) { pInfo->returnDelete = true; - printDataBlock(pInfo->pDelRes, "sems session"); + printDataBlock(pInfo->pDelRes, "semi session"); return pInfo->pDelRes; } if (pInfo->pUpdateRes->info.rows > 0) { // process the rest of the data pOperator->status = OP_OPENED; - printDataBlock(pInfo->pUpdateRes, "sems session"); + printDataBlock(pInfo->pUpdateRes, "semi session"); return pInfo->pUpdateRes; } @@ -4445,8 +4468,7 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream pOperator->name = "StreamSessionFinalAggOperator"; } else { pInfo->isFinal = false; - pInfo->pUpdateRes = createResDataBlock(pPhyNode->pOutputDataBlockDesc); - pInfo->pUpdateRes->info.type = STREAM_CLEAR; + pInfo->pUpdateRes = createSpecialDataBlock(STREAM_CLEAR); blockDataEnsureCapacity(pInfo->pUpdateRes, 128); pOperator->name = "StreamSessionSemiAggOperator"; pOperator->fpSet = @@ -4468,7 +4490,7 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream _error: if (pInfo != NULL) { - destroyStreamSessionAggOperatorInfo(pInfo, pOperator->exprSupp.numOfExprs); + destroyStreamSessionAggOperatorInfo(pInfo); } taosMemoryFreeClear(pOperator); @@ -4476,7 +4498,7 @@ _error: return NULL; } -void destroyStreamStateOperatorInfo(void* param, int32_t numOfOutput) { +void destroyStreamStateOperatorInfo(void* param) { SStreamStateAggOperatorInfo* pInfo = (SStreamStateAggOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); destroyStateStreamAggSupporter(&pInfo->streamAggSup); @@ -4486,7 +4508,7 @@ void destroyStreamStateOperatorInfo(void* param, int32_t numOfOutput) { for (int32_t i = 0; i < size; i++) { SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i); SStreamSessionAggOperatorInfo* pChInfo = pChild->info; - destroyStreamSessionAggOperatorInfo(pChInfo, numOfOutput); + destroyStreamSessionAggOperatorInfo(pChInfo); taosMemoryFreeClear(pChild); taosMemoryFreeClear(pChInfo); } @@ -4623,8 +4645,9 @@ SStateWindowInfo* getStateWindow(SStreamAggSupporter* pAggSup, TSKEY ts, uint64_ return insertNewStateWindow(pWinInfos, ts, pKeyData, index + 1, pCol); } -int32_t updateStateWindowInfo(SArray* pWinInfos, int32_t winIndex, TSKEY* pTs, SColumnInfoData* pKeyCol, int32_t rows, - int32_t start, bool* allEqual, SHashObj* pSeDelete) { +int32_t updateStateWindowInfo(SArray* pWinInfos, int32_t winIndex, TSKEY* pTs, uint64_t groupId, + SColumnInfoData* pKeyCol, int32_t rows, int32_t start, bool* allEqual, + SHashObj* pSeDeleted) { *allEqual = true; SStateWindowInfo* pWinInfo = taosArrayGet(pWinInfos, winIndex); for (int32_t i = start; i < rows; ++i) { @@ -4644,9 +4667,9 @@ int32_t updateStateWindowInfo(SArray* pWinInfos, int32_t winIndex, TSKEY* pTs, S } } if (pWinInfo->winInfo.win.skey > pTs[i]) { - if (pSeDelete && pWinInfo->winInfo.isOutput) { - taosHashPut(pSeDelete, &pWinInfo->winInfo.pos, sizeof(SResultRowPosition), &pWinInfo->winInfo.win.skey, - sizeof(TSKEY)); + if (pSeDeleted && pWinInfo->winInfo.isOutput) { + SWinKey res = {.ts = pWinInfo->winInfo.win.skey, .groupId = groupId}; + taosHashPut(pSeDeleted, &pWinInfo->winInfo.pos, sizeof(SResultRowPosition), &res, sizeof(SWinKey)); pWinInfo->winInfo.isOutput = false; } pWinInfo->winInfo.win.skey = pTs[i]; @@ -4659,23 +4682,21 @@ int32_t updateStateWindowInfo(SArray* pWinInfos, int32_t winIndex, TSKEY* pTs, S return rows - start; } -static void doClearStateWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, int32_t tsIndex, SColumn* pCol, - int32_t keyIndex, SHashObj* pSeUpdated, SHashObj* pSeDeleted) { - SColumnInfoData* pTsColInfo = taosArrayGet(pBlock->pDataBlock, tsIndex); - SColumnInfoData* pKeyColInfo = taosArrayGet(pBlock->pDataBlock, keyIndex); +static void doClearStateWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, SHashObj* pSeUpdated, + SHashObj* pSeDeleted) { + SColumnInfoData* pTsColInfo = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX); + SColumnInfoData* pGroupColInfo = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX); TSKEY* tsCol = (TSKEY*)pTsColInfo->pData; bool allEqual = false; int32_t step = 1; + uint64_t* gpCol = (uint64_t*)pGroupColInfo->pData; for (int32_t i = 0; i < pBlock->info.rows; i += step) { - char* pKeyData = colDataGetData(pKeyColInfo, i); int32_t winIndex = 0; - SStateWindowInfo* pCurWin = getStateWindowByTs(pAggSup, tsCol[i], pBlock->info.groupId, &winIndex); + SStateWindowInfo* pCurWin = getStateWindowByTs(pAggSup, tsCol[i], gpCol[i], &winIndex); if (!pCurWin) { continue; } - step = updateStateWindowInfo(pAggSup->pCurWins, winIndex, tsCol, pKeyColInfo, pBlock->info.rows, i, &allEqual, - pSeDeleted); - ASSERT(isTsInWindow(pCurWin, tsCol[i]) || isEqualStateKey(pCurWin, pKeyData)); + updateSessionWindowInfo(&pCurWin->winInfo, tsCol, NULL, 0, pBlock->info.rows, i, 0, NULL); taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition)); deleteWindow(pAggSup->pCurWins, winIndex, destroyStateWinInfo); } @@ -4712,27 +4733,26 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl char* pKeyData = colDataGetData(pKeyColInfo, i); int32_t winIndex = 0; bool allEqual = true; - SStateWindowInfo* pCurWin = - getStateWindow(pAggSup, tsCols[i], pSDataBlock->info.groupId, pKeyData, &pInfo->stateCol, &winIndex); - winRows = updateStateWindowInfo(pAggSup->pCurWins, winIndex, tsCols, pKeyColInfo, pSDataBlock->info.rows, i, - &allEqual, pInfo->pSeDeleted); + SStateWindowInfo* pCurWin = getStateWindow(pAggSup, tsCols[i], groupId, pKeyData, &pInfo->stateCol, &winIndex); + winRows = updateStateWindowInfo(pAggSup->pCurWins, winIndex, tsCols, groupId, pKeyColInfo, pSDataBlock->info.rows, + i, &allEqual, pStDeleted); if (!allEqual) { - appendOneRow(pAggSup->pScanBlock, &pCurWin->winInfo.win.skey, &pCurWin->winInfo.win.ekey, - &pSDataBlock->info.groupId); + appendOneRow(pAggSup->pScanBlock, &pCurWin->winInfo.win.skey, &pCurWin->winInfo.win.ekey, GROUPID_COLUMN_INDEX, + &groupId); taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition)); deleteWindow(pAggSup->pCurWins, winIndex, destroyStateWinInfo); continue; } code = doOneStateWindowAgg(pInfo, pSDataBlock, &pCurWin->winInfo, &pResult, i, winRows, numOfOutput, pOperator); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } pCurWin->winInfo.isClosed = false; if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) { - SWinRes value = {.ts = pCurWin->winInfo.win.skey, .groupId = groupId}; - code = taosHashPut(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition), &value, sizeof(SWinRes)); + SWinKey value = {.ts = pCurWin->winInfo.win.skey, .groupId = groupId}; + code = taosHashPut(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition), &value, sizeof(SWinKey)); if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } pCurWin->winInfo.isOutput = true; } @@ -4773,8 +4793,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) { printDataBlock(pBlock, "single state recv"); if (pBlock->info.type == STREAM_CLEAR) { - doClearStateWindows(&pInfo->streamAggSup, pBlock, pInfo->primaryTsIndex, &pInfo->stateCol, pInfo->stateCol.slotId, - pSeUpdated, pInfo->pSeDeleted); + doClearStateWindows(&pInfo->streamAggSup, pBlock, pSeUpdated, pInfo->pSeDeleted); continue; } else if (pBlock->info.type == STREAM_DELETE_DATA) { SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo)); @@ -4882,9 +4901,7 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); pInfo->pSeDeleted = taosHashInit(64, hashFn, true, HASH_NO_LOCK); pInfo->pDelIterator = NULL; - // pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT); - pInfo->pDelRes = createOneDataBlock(pInfo->binfo.pRes, false); // todo(liuyao) for delete - pInfo->pDelRes->info.type = STREAM_DELETE_RESULT; // todo(liuyao) for delete + pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT); pInfo->pChildren = NULL; pInfo->ignoreExpiredData = pStateNode->window.igExpired; @@ -4892,8 +4909,6 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE; pOperator->blocking = true; pOperator->status = OP_NOT_OPENED; - pOperator->exprSupp.numOfExprs = numOfCols; - pOperator->exprSupp.pExprInfo = pExprInfo; pOperator->pTaskInfo = pTaskInfo; pOperator->info = pInfo; pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamStateAgg, NULL, NULL, @@ -4906,16 +4921,15 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys return pOperator; _error: - destroyStreamStateOperatorInfo(pInfo, numOfCols); + destroyStreamStateOperatorInfo(pInfo); taosMemoryFreeClear(pOperator); pTaskInfo->code = code; return NULL; } -void destroyMergeAlignedIntervalOperatorInfo(void* param, int32_t numOfOutput) { +void destroyMergeAlignedIntervalOperatorInfo(void* param) { SMergeAlignedIntervalAggOperatorInfo* miaInfo = (SMergeAlignedIntervalAggOperatorInfo*)param; - destroyIntervalOperatorInfo(miaInfo->intervalAggOperatorInfo, numOfOutput); - + destroyIntervalOperatorInfo(miaInfo->intervalAggOperatorInfo); taosMemoryFreeClear(param); } @@ -4928,14 +4942,14 @@ static int32_t outputMergeAlignedIntervalResult(SOperatorInfo* pOperatorInfo, ui SExprSupp* pSup = &pOperatorInfo->exprSupp; SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &wstartTs, TSDB_KEYSIZE, tableGroupId); - SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, + SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE)); ASSERT(p1 != NULL); finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pSup->pCtx, pSup->pExprInfo, pSup->numOfExprs, pSup->rowEntryInfoOffset, pResultBlock, pTaskInfo); - taosHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE)); - ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0); + tSimpleHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE)); + ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0); return TSDB_CODE_SUCCESS; } @@ -4958,7 +4972,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR // there is an result exists if (miaInfo->curTs != INT64_MIN) { - ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1); + ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1); if (ts != miaInfo->curTs) { outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, miaInfo->curTs); @@ -4966,7 +4980,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR } } else { miaInfo->curTs = ts; - ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0); + ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0); } STimeWindow win = {0}; @@ -4978,7 +4992,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } int32_t currPos = startPos; @@ -4990,8 +5004,8 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR } updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true); - doApplyFunctions(pTaskInfo, pSup->pCtx, &currWin, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos, - tsCols, pBlock->info.rows, numOfOutput, iaInfo->inputOrder); + doApplyFunctions(pTaskInfo, pSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos, + pBlock->info.rows, numOfOutput); outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, miaInfo->curTs); miaInfo->curTs = tsCols[currPos]; @@ -5005,15 +5019,15 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR ret = setTimeWindowOutputBuf(pResultRowInfo, &currWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } miaInfo->curTs = currWin.skey; } updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true); - doApplyFunctions(pTaskInfo, pSup->pCtx, &currWin, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos, - tsCols, pBlock->info.rows, numOfOutput, iaInfo->inputOrder); + doApplyFunctions(pTaskInfo, pSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos, + pBlock->info.rows, numOfOutput); } static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { @@ -5042,7 +5056,7 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { if (pBlock == NULL) { // close last unfinalized time window if (miaInfo->curTs != INT64_MIN) { - ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1); + ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1); outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs); miaInfo->curTs = INT64_MIN; } @@ -5110,9 +5124,7 @@ static SSDataBlock* mergeAlignedIntervalAgg(SOperatorInfo* pOperator) { return (rows == 0) ? NULL : pRes; } -SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, - int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, - int32_t primaryTsSlotId, SNode* pCondition, bool mergeResultBlock, +SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SMergeAlignedIntervalPhysiNode* pNode, SExecTaskInfo* pTaskInfo) { SMergeAlignedIntervalAggOperatorInfo* miaInfo = taosMemoryCalloc(1, sizeof(SMergeAlignedIntervalAggOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); @@ -5125,48 +5137,54 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, goto _error; } + int32_t num = 0; + SExprInfo* pExprInfo = createExprInfo(pNode->window.pFuncs, NULL, &num); + SSDataBlock* pResBlock = createResDataBlock(pNode->window.node.pOutputDataBlockDesc); + + SInterval interval = {.interval = pNode->interval, + .sliding = pNode->sliding, + .intervalUnit = pNode->intervalUnit, + .slidingUnit = pNode->slidingUnit, + .offset = pNode->offset, + .precision = ((SColumnNode*)pNode->window.pTspk)->node.resType.precision}; + SIntervalAggOperatorInfo* iaInfo = miaInfo->intervalAggOperatorInfo; SExprSupp* pSup = &pOperator->exprSupp; - miaInfo->pCondition = pCondition; - miaInfo->curTs = INT64_MIN; - - iaInfo->win = pTaskInfo->window; - iaInfo->inputOrder = TSDB_ORDER_ASC; - iaInfo->interval = *pInterval; - iaInfo->execModel = pTaskInfo->execModel; - iaInfo->primaryTsIndex = primaryTsSlotId; - iaInfo->binfo.mergeResultBlock = mergeResultBlock; + miaInfo->pCondition = pNode->window.node.pConditions; + miaInfo->curTs = INT64_MIN; + iaInfo->win = pTaskInfo->window; + iaInfo->inputOrder = TSDB_ORDER_ASC; + iaInfo->interval = interval; + iaInfo->execModel = pTaskInfo->execModel; + iaInfo->primaryTsIndex = ((SColumnNode*)pNode->window.pTspk)->slotId; + iaInfo->binfo.mergeResultBlock = pNode->window.mergeDataBlock; size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; initResultSizeInfo(&pOperator->resultInfo, 4096); - int32_t code = - initAggInfo(&pOperator->exprSupp, &iaInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str); - initBasicInfo(&iaInfo->binfo, pResBlock); + int32_t code = initAggInfo(&pOperator->exprSupp, &iaInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + initBasicInfo(&iaInfo->binfo, pResBlock); initExecTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &iaInfo->win); - iaInfo->timeWindowInterpo = timeWindowinterpNeeded(pSup->pCtx, numOfCols, iaInfo); + iaInfo->timeWindowInterpo = timeWindowinterpNeeded(pSup->pCtx, num, iaInfo); if (iaInfo->timeWindowInterpo) { - iaInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SResultRowPosition)); - } - - if (code != TSDB_CODE_SUCCESS) { - goto _error; + iaInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SOpenWindowInfo)); } initResultRowInfo(&iaInfo->binfo.resultRowInfo); blockDataEnsureCapacity(iaInfo->binfo.pRes, pOperator->resultInfo.capacity); - pOperator->name = "TimeMergeAlignedIntervalAggOperator"; + pOperator->name = "TimeMergeAlignedIntervalAggOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->exprSupp.pExprInfo = pExprInfo; - pOperator->pTaskInfo = pTaskInfo; - pOperator->exprSupp.numOfExprs = numOfCols; - pOperator->info = miaInfo; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->pTaskInfo = pTaskInfo; + pOperator->info = miaInfo; pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, mergeAlignedIntervalAgg, NULL, NULL, destroyMergeAlignedIntervalOperatorInfo, NULL, NULL, NULL); @@ -5179,7 +5197,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, return pOperator; _error: - destroyMergeAlignedIntervalOperatorInfo(miaInfo, numOfCols); + destroyMergeAlignedIntervalOperatorInfo(miaInfo); taosMemoryFreeClear(pOperator); pTaskInfo->code = code; return NULL; @@ -5202,10 +5220,10 @@ typedef struct SGroupTimeWindow { STimeWindow window; } SGroupTimeWindow; -void destroyMergeIntervalOperatorInfo(void* param, int32_t numOfOutput) { +void destroyMergeIntervalOperatorInfo(void* param) { SMergeIntervalAggOperatorInfo* miaInfo = (SMergeIntervalAggOperatorInfo*)param; tdListFree(miaInfo->groupIntervals); - destroyIntervalOperatorInfo(&miaInfo->intervalAggOperatorInfo, numOfOutput); + destroyIntervalOperatorInfo(&miaInfo->intervalAggOperatorInfo); taosMemoryFreeClear(param); } @@ -5219,12 +5237,12 @@ static int32_t finalizeWindowResult(SOperatorInfo* pOperatorInfo, uint64_t table SExprSupp* pExprSup = &pOperatorInfo->exprSupp; SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &win->skey, TSDB_KEYSIZE, tableGroupId); - SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, + SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE)); ASSERT(p1 != NULL); finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pExprSup->pCtx, pExprSup->pExprInfo, pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset, pResultBlock, pTaskInfo); - taosHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE)); + tSimpleHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE)); return TSDB_CODE_SUCCESS; } @@ -5280,7 +5298,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pExprSup->pCtx, numOfOutput, pExprSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } TSKEY ekey = ascScan ? win.ekey : win.skey; @@ -5290,14 +5308,14 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* // prev time window not interpolation yet. if (iaInfo->timeWindowInterpo) { - SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult); + SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult, tableGroupId); doInterpUnclosedTimeWindow(pOperatorInfo, numOfOutput, pResultRowInfo, pBlock, scanFlag, tsCols, &pos); // restore current time window ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pExprSup->pCtx, numOfOutput, pExprSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } // window start key interpolation @@ -5305,8 +5323,8 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* } updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &win, true); - doApplyFunctions(pTaskInfo, pExprSup->pCtx, &win, &iaInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols, - pBlock->info.rows, numOfOutput, iaInfo->inputOrder); + doApplyFunctions(pTaskInfo, pExprSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, forwardRows, + pBlock->info.rows, numOfOutput); doCloseWindow(pResultRowInfo, iaInfo, pResult); // output previous interval results after this interval (&win) is closed @@ -5326,7 +5344,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* setTimeWindowOutputBuf(pResultRowInfo, &nextWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pExprSup->pCtx, numOfOutput, pExprSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } ekey = ascScan ? nextWin.ekey : nextWin.skey; @@ -5337,8 +5355,8 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* doWindowBorderInterpolation(iaInfo, pBlock, pResult, &nextWin, startPos, forwardRows, pExprSup); updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &nextWin, true); - doApplyFunctions(pTaskInfo, pExprSup->pCtx, &nextWin, &iaInfo->twAggSup.timeWindowData, startPos, forwardRows, - tsCols, pBlock->info.rows, numOfOutput, iaInfo->inputOrder); + doApplyFunctions(pTaskInfo, pExprSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, forwardRows, + pBlock->info.rows, numOfOutput); doCloseWindow(pResultRowInfo, iaInfo, pResult); // output previous interval results after this interval (&nextWin) is closed @@ -5423,54 +5441,65 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) { return (rows == 0) ? NULL : pRes; } -SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, - SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, - bool mergeBlock, SExecTaskInfo* pTaskInfo) { - SMergeIntervalAggOperatorInfo* miaInfo = taosMemoryCalloc(1, sizeof(SMergeIntervalAggOperatorInfo)); +SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMergeIntervalPhysiNode* pIntervalPhyNode, + SExecTaskInfo* pTaskInfo) { + SMergeIntervalAggOperatorInfo* pMergeIntervalInfo = taosMemoryCalloc(1, sizeof(SMergeIntervalAggOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); - if (miaInfo == NULL || pOperator == NULL) { + if (pMergeIntervalInfo == NULL || pOperator == NULL) { goto _error; } - miaInfo->groupIntervals = tdListNew(sizeof(SGroupTimeWindow)); + int32_t num = 0; + SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &num); + SSDataBlock* pResBlock = createResDataBlock(pIntervalPhyNode->window.node.pOutputDataBlockDesc); + + SInterval interval = {.interval = pIntervalPhyNode->interval, + .sliding = pIntervalPhyNode->sliding, + .intervalUnit = pIntervalPhyNode->intervalUnit, + .slidingUnit = pIntervalPhyNode->slidingUnit, + .offset = pIntervalPhyNode->offset, + .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision}; - SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo; - iaInfo->win = pTaskInfo->window; - iaInfo->inputOrder = TSDB_ORDER_ASC; - iaInfo->interval = *pInterval; - iaInfo->execModel = pTaskInfo->execModel; - iaInfo->binfo.mergeResultBlock = mergeBlock; + pMergeIntervalInfo->groupIntervals = tdListNew(sizeof(SGroupTimeWindow)); - iaInfo->primaryTsIndex = primaryTsSlotId; + SIntervalAggOperatorInfo* pIntervalInfo = &pMergeIntervalInfo->intervalAggOperatorInfo; + pIntervalInfo->win = pTaskInfo->window; + pIntervalInfo->inputOrder = TSDB_ORDER_ASC; + pIntervalInfo->interval = interval; + pIntervalInfo->execModel = pTaskInfo->execModel; + pIntervalInfo->binfo.mergeResultBlock = pIntervalPhyNode->window.mergeDataBlock; + pIntervalInfo->primaryTsIndex = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId; SExprSupp* pExprSupp = &pOperator->exprSupp; size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; initResultSizeInfo(&pOperator->resultInfo, 4096); - int32_t code = initAggInfo(pExprSupp, &iaInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str); - initBasicInfo(&iaInfo->binfo, pResBlock); + int32_t code = initAggInfo(pExprSupp, &pIntervalInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } - initExecTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &iaInfo->win); + initBasicInfo(&pIntervalInfo->binfo, pResBlock); + initExecTimeWindowInfo(&pIntervalInfo->twAggSup.timeWindowData, &pIntervalInfo->win); - iaInfo->timeWindowInterpo = timeWindowinterpNeeded(pExprSupp->pCtx, numOfCols, iaInfo); - if (iaInfo->timeWindowInterpo) { - iaInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SResultRowPosition)); - if (iaInfo->binfo.resultRowInfo.openWindow == NULL) { + + pIntervalInfo->timeWindowInterpo = timeWindowinterpNeeded(pExprSupp->pCtx, num, pIntervalInfo); + if (pIntervalInfo->timeWindowInterpo) { + pIntervalInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SOpenWindowInfo)); + if (pIntervalInfo->binfo.resultRowInfo.openWindow == NULL) { goto _error; } } - initResultRowInfo(&iaInfo->binfo.resultRowInfo); + initResultRowInfo(&pIntervalInfo->binfo.resultRowInfo); - pOperator->name = "TimeMergeIntervalAggOperator"; + pOperator->name = "TimeMergeIntervalAggOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->exprSupp.pExprInfo = pExprInfo; - pOperator->pTaskInfo = pTaskInfo; - pOperator->exprSupp.numOfExprs = numOfCols; - pOperator->info = miaInfo; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->pTaskInfo = pTaskInfo; + pOperator->info = pMergeIntervalInfo; pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doMergeIntervalAgg, NULL, NULL, destroyMergeIntervalOperatorInfo, NULL, NULL, NULL); @@ -5483,7 +5512,7 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprI return pOperator; _error: - destroyMergeIntervalOperatorInfo(miaInfo, numOfCols); + destroyMergeIntervalOperatorInfo(pMergeIntervalInfo); taosMemoryFreeClear(pOperator); pTaskInfo->code = code; return NULL; diff --git a/source/libs/executor/src/tlinearhash.c b/source/libs/executor/src/tlinearhash.c index ad97d79f7e7ad28da3cf51aab33010303e11f509..cffabcb6aca1f1f5ba457fb765828889bc3c03e6 100644 --- a/source/libs/executor/src/tlinearhash.c +++ b/source/libs/executor/src/tlinearhash.c @@ -26,7 +26,7 @@ typedef struct SLHashBucket { int32_t size; // the number of element in this entry } SLHashBucket; -typedef struct SLHashObj { +struct SLHashObj { SDiskbasedBuf *pBuf; _hash_fn_t hashFn; SLHashBucket **pBucket; // entry list @@ -35,7 +35,7 @@ typedef struct SLHashObj { int32_t bits; // the number of bits used in hash int32_t numOfBuckets; // the number of buckets int64_t size; // the number of total items -} SLHashObj; +}; /** * the data struct for each hash node @@ -97,9 +97,9 @@ static int32_t doAddToBucket(SLHashObj* pHashObj, SLHashBucket* pBucket, int32_t // allocate the overflow buffer page to hold this k/v. int32_t newPageId = -1; - SFilePage* pNewPage = getNewBufPage(pHashObj->pBuf, 0, &newPageId); + SFilePage* pNewPage = getNewBufPage(pHashObj->pBuf, &newPageId); if (pNewPage == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } taosArrayPush(pBucket->pPageIdList, &newPageId); @@ -138,7 +138,6 @@ static void doRemoveFromBucket(SFilePage* pPage, SLHashNode* pNode, SLHashBucket } setBufPageDirty(pPage, true); - pBucket->size -= 1; } @@ -228,7 +227,11 @@ static int32_t doAddNewBucket(SLHashObj* pHashObj) { } int32_t pageId = -1; - SFilePage* p = getNewBufPage(pHashObj->pBuf, 0, &pageId); + SFilePage* p = getNewBufPage(pHashObj->pBuf, &pageId); + if (p == NULL) { + return terrno; + } + p->num = sizeof(SFilePage); setBufPageDirty(p, true); @@ -252,7 +255,8 @@ SLHashObj* tHashInit(int32_t inMemPages, int32_t pageSize, _hash_fn_t fn, int32_ printf("tHash Init failed since %s", terrstr(terrno)); return NULL; } - int32_t code = createDiskbasedBuf(&pHashObj->pBuf, pageSize, inMemPages * pageSize, 0, tsTempDir); + + int32_t code = createDiskbasedBuf(&pHashObj->pBuf, pageSize, inMemPages * pageSize, "", tsTempDir); if (code != 0) { terrno = code; return NULL; @@ -389,7 +393,9 @@ char* tHashGet(SLHashObj* pHashObj, const void *key, size_t keyLen) { } SLHashBucket* pBucket = pHashObj->pBucket[bucketId]; - for (int32_t i = 0; i < taosArrayGetSize(pBucket->pPageIdList); ++i) { + int32_t num = taosArrayGetSize(pBucket->pPageIdList); + + for (int32_t i = 0; i < num; ++i) { int32_t pageId = *(int32_t*)taosArrayGet(pBucket->pPageIdList, i); SFilePage* p = getBufPage(pHashObj->pBuf, pageId); diff --git a/source/libs/executor/src/tsimplehash.c b/source/libs/executor/src/tsimplehash.c index 7989ad2b5a44e6ca35074cff15ec865492025328..84b615af7a93aef9fbf86190a2544474b7b2c87b 100644 --- a/source/libs/executor/src/tsimplehash.c +++ b/source/libs/executor/src/tsimplehash.c @@ -14,8 +14,8 @@ */ #include "tsimplehash.h" -#include "os.h" #include "taoserror.h" +#include "tlog.h" #define SHASH_DEFAULT_LOAD_FACTOR 0.75 #define HASH_MAX_CAPACITY (1024 * 1024 * 16) @@ -31,19 +31,12 @@ taosMemoryFreeClear(_n); \ } while (0); -typedef struct SHNode { - struct SHNode *next; - char data[]; -} SHNode; - struct SSHashObj { SHNode **hashList; size_t capacity; // number of slots int64_t size; // number of elements in hash table _hash_fn_t hashFp; // hash function _equal_fn_t equalFp; // equal function - int32_t keyLen; - int32_t dataLen; }; static FORCE_INLINE int32_t taosHashCapacity(int32_t length) { @@ -54,7 +47,7 @@ static FORCE_INLINE int32_t taosHashCapacity(int32_t length) { return i; } -SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn, size_t keyLen, size_t dataLen) { +SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn) { ASSERT(fn != NULL); if (capacity == 0) { @@ -74,9 +67,6 @@ SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn, size_t keyLen, size_t pHashObj->hashFp = fn; ASSERT((pHashObj->capacity & (pHashObj->capacity - 1)) == 0); - pHashObj->keyLen = keyLen; - pHashObj->dataLen = dataLen; - pHashObj->hashList = (SHNode **)taosMemoryCalloc(pHashObj->capacity, sizeof(void *)); if (!pHashObj->hashList) { taosMemoryFree(pHashObj); @@ -93,40 +83,41 @@ int32_t tSimpleHashGetSize(const SSHashObj *pHashObj) { return (int32_t)atomic_load_64((int64_t *)&pHashObj->size); } -static SHNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal) { - SHNode *pNewNode = taosMemoryMalloc(sizeof(SHNode) + keyLen + dsize); +static SHNode *doCreateHashNode(const void *key, size_t keyLen, const void *data, size_t dataLen, uint32_t hashVal) { + SHNode *pNewNode = taosMemoryMalloc(sizeof(SHNode) + keyLen + dataLen); if (!pNewNode) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } - + pNewNode->keyLen = keyLen; + pNewNode->dataLen = dataLen; pNewNode->next = NULL; - memcpy(GET_SHASH_NODE_DATA(pNewNode), pData, dsize); - memcpy(GET_SHASH_NODE_KEY(pNewNode, dsize), key, keyLen); + memcpy(GET_SHASH_NODE_DATA(pNewNode), data, dataLen); + memcpy(GET_SHASH_NODE_KEY(pNewNode, dataLen), key, keyLen); return pNewNode; } -static void taosHashTableResize(SSHashObj *pHashObj) { +static void tSimpleHashTableResize(SSHashObj *pHashObj) { if (!SHASH_NEED_RESIZE(pHashObj)) { return; } int32_t newCapacity = (int32_t)(pHashObj->capacity << 1u); if (newCapacity > HASH_MAX_CAPACITY) { - // uDebug("current capacity:%zu, maximum capacity:%d, no resize applied due to limitation is reached", - // pHashObj->capacity, HASH_MAX_CAPACITY); + uDebug("current capacity:%zu, maximum capacity:%" PRIu64 ", no resize applied due to limitation is reached", + pHashObj->capacity, HASH_MAX_CAPACITY); return; } int64_t st = taosGetTimestampUs(); void *pNewEntryList = taosMemoryRealloc(pHashObj->hashList, sizeof(void *) * newCapacity); if (!pNewEntryList) { - // qWarn("hash resize failed due to out of memory, capacity remain:%zu", pHashObj->capacity); + uWarn("hash resize failed due to out of memory, capacity remain:%zu", pHashObj->capacity); return; } size_t inc = newCapacity - pHashObj->capacity; - memset((char *)pNewEntryList + pHashObj->capacity * sizeof(void *), 0, inc); + memset((char *)pNewEntryList + pHashObj->capacity * sizeof(void *), 0, inc * sizeof(void *)); pHashObj->hashList = pNewEntryList; pHashObj->capacity = newCapacity; @@ -141,8 +132,8 @@ static void taosHashTableResize(SSHashObj *pHashObj) { SHNode *pPrev = NULL; while (pNode != NULL) { - void *key = GET_SHASH_NODE_KEY(pNode, pHashObj->dataLen); - uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)pHashObj->keyLen); + void *key = GET_SHASH_NODE_KEY(pNode, pNode->dataLen); + uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)pNode->keyLen); int32_t newIdx = HASH_INDEX(hashVal, pHashObj->capacity); pNext = pNode->next; @@ -170,23 +161,23 @@ static void taosHashTableResize(SSHashObj *pHashObj) { // ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0); } -int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, const void *data) { +int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, const void *data, size_t dataLen) { if (!pHashObj || !key) { return -1; } - uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)pHashObj->keyLen); + uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen); // need the resize process, write lock applied if (SHASH_NEED_RESIZE(pHashObj)) { - taosHashTableResize(pHashObj); + tSimpleHashTableResize(pHashObj); } int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); SHNode *pNode = pHashObj->hashList[slot]; if (!pNode) { - SHNode *pNewNode = doCreateHashNode(key, pHashObj->keyLen, data, pHashObj->dataLen, hashVal); + SHNode *pNewNode = doCreateHashNode(key, keyLen, data, dataLen, hashVal); if (!pNewNode) { return -1; } @@ -197,14 +188,14 @@ int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, const void *data) { } while (pNode) { - if ((*(pHashObj->equalFp))(GET_SHASH_NODE_KEY(pNode, pHashObj->dataLen), key, pHashObj->keyLen) == 0) { + if ((*(pHashObj->equalFp))(GET_SHASH_NODE_KEY(pNode, pNode->dataLen), key, keyLen) == 0) { break; } pNode = pNode->next; } if (!pNode) { - SHNode *pNewNode = doCreateHashNode(key, pHashObj->keyLen, data, pHashObj->dataLen, hashVal); + SHNode *pNewNode = doCreateHashNode(key, keyLen, data, dataLen, hashVal); if (!pNewNode) { return -1; } @@ -212,16 +203,16 @@ int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, const void *data) { pHashObj->hashList[slot] = pNewNode; atomic_add_fetch_64(&pHashObj->size, 1); } else { // update data - memcpy(GET_SHASH_NODE_DATA(pNode), data, pHashObj->dataLen); + memcpy(GET_SHASH_NODE_DATA(pNode), data, dataLen); } return 0; } -static FORCE_INLINE SHNode *doSearchInEntryList(SSHashObj *pHashObj, const void *key, int32_t index) { +static FORCE_INLINE SHNode *doSearchInEntryList(SSHashObj *pHashObj, const void *key, size_t keyLen, int32_t index) { SHNode *pNode = pHashObj->hashList[index]; while (pNode) { - if ((*(pHashObj->equalFp))(GET_SHASH_NODE_KEY(pNode, pHashObj->dataLen), key, pHashObj->keyLen) == 0) { + if ((*(pHashObj->equalFp))(GET_SHASH_NODE_KEY(pNode, pNode->dataLen), key, keyLen) == 0) { break; } @@ -233,12 +224,12 @@ static FORCE_INLINE SHNode *doSearchInEntryList(SSHashObj *pHashObj, const void static FORCE_INLINE bool taosHashTableEmpty(const SSHashObj *pHashObj) { return tSimpleHashGetSize(pHashObj) == 0; } -void *tSimpleHashGet(SSHashObj *pHashObj, const void *key) { +void *tSimpleHashGet(SSHashObj *pHashObj, const void *key, size_t keyLen) { if (!pHashObj || taosHashTableEmpty(pHashObj) || !key) { return NULL; } - uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)pHashObj->keyLen); + uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen); int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); SHNode *pNode = pHashObj->hashList[slot]; @@ -247,7 +238,7 @@ void *tSimpleHashGet(SSHashObj *pHashObj, const void *key) { } char *data = NULL; - pNode = doSearchInEntryList(pHashObj, key, slot); + pNode = doSearchInEntryList(pHashObj, key, keyLen, slot); if (pNode != NULL) { data = GET_SHASH_NODE_DATA(pNode); } @@ -255,19 +246,19 @@ void *tSimpleHashGet(SSHashObj *pHashObj, const void *key) { return data; } -int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key) { +int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key, size_t keyLen) { if (!pHashObj || !key) { return TSDB_CODE_FAILED; } - uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)pHashObj->keyLen); + uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen); int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); SHNode *pNode = pHashObj->hashList[slot]; SHNode *pPrev = NULL; while (pNode) { - if ((*(pHashObj->equalFp))(GET_SHASH_NODE_KEY(pNode, pHashObj->dataLen), key, pHashObj->keyLen) == 0) { + if ((*(pHashObj->equalFp))(GET_SHASH_NODE_KEY(pNode, pNode->dataLen), key, keyLen) == 0) { if (!pPrev) { pHashObj->hashList[slot] = pNode->next; } else { @@ -284,6 +275,40 @@ int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key) { return TSDB_CODE_SUCCESS; } +int32_t tSimpleHashIterateRemove(SSHashObj *pHashObj, const void *key, size_t keyLen, void **pIter, int32_t *iter) { + if (!pHashObj || !key) { + return TSDB_CODE_FAILED; + } + + uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen); + + int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); + + SHNode *pNode = pHashObj->hashList[slot]; + SHNode *pPrev = NULL; + while (pNode) { + if ((*(pHashObj->equalFp))(GET_SHASH_NODE_KEY(pNode, pNode->dataLen), key, keyLen) == 0) { + if (!pPrev) { + pHashObj->hashList[slot] = pNode->next; + } else { + pPrev->next = pNode->next; + } + + if (*pIter == (void *)GET_SHASH_NODE_DATA(pNode)) { + *pIter = pPrev ? GET_SHASH_NODE_DATA(pPrev) : NULL; + } + + FREE_HASH_NODE(pNode); + atomic_sub_fetch_64(&pHashObj->size, 1); + break; + } + pPrev = pNode; + pNode = pNode->next; + } + + return TSDB_CODE_SUCCESS; +} + void tSimpleHashClear(SSHashObj *pHashObj) { if (!pHashObj || taosHashTableEmpty(pHashObj)) { return; @@ -301,6 +326,7 @@ void tSimpleHashClear(SSHashObj *pHashObj) { FREE_HASH_NODE(pNode); pNode = pNext; } + pHashObj->hashList[i] = NULL; } atomic_store_64(&pHashObj->size, 0); } @@ -312,6 +338,7 @@ void tSimpleHashCleanup(SSHashObj *pHashObj) { tSimpleHashClear(pHashObj); taosMemoryFreeClear(pHashObj->hashList); + taosMemoryFree(pHashObj); } size_t tSimpleHashGetMemSize(const SSHashObj *pHashObj) { @@ -322,25 +349,6 @@ size_t tSimpleHashGetMemSize(const SSHashObj *pHashObj) { return (pHashObj->capacity * sizeof(void *)) + sizeof(SHNode) * tSimpleHashGetSize(pHashObj) + sizeof(SSHashObj); } -void *tSimpleHashGetKey(const SSHashObj *pHashObj, void *data, size_t *keyLen) { -#if 0 - int32_t offset = offsetof(SHNode, data); - SHNode *node = ((SHNode *)(char *)data - offset); - if (keyLen) { - *keyLen = pHashObj->keyLen; - } - - return POINTER_SHIFT(data, pHashObj->dataLen); - - return GET_SHASH_NODE_KEY(node, pHashObj->dataLen); -#endif - if (keyLen) { - *keyLen = pHashObj->keyLen; - } - - return POINTER_SHIFT(data, pHashObj->dataLen); -} - void *tSimpleHashIterate(const SSHashObj *pHashObj, void *data, int32_t *iter) { if (!pHashObj) { return NULL; @@ -349,7 +357,7 @@ void *tSimpleHashIterate(const SSHashObj *pHashObj, void *data, int32_t *iter) { SHNode *pNode = NULL; if (!data) { - for (int32_t i = 0; i < pHashObj->capacity; ++i) { + for (int32_t i = *iter; i < pHashObj->capacity; ++i) { pNode = pHashObj->hashList[i]; if (!pNode) { continue; diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index 48af951773814d9979eb6d349670753ad4b036eb..168cd21c4478d9c1b50053fadf0e9dcdf518d4f4 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -97,7 +97,7 @@ SSortHandle* tsortCreateSortHandle(SArray* pSortInfo, int32_t type, int32_t page return pSortHandle; } -static int32_t sortComparClearup(SMsortComparParam* cmpParam) { +static int32_t sortComparCleanup(SMsortComparParam* cmpParam) { for(int32_t i = 0; i < cmpParam->numOfSources; ++i) { SSortSource* pSource = cmpParam->pSources[i]; // NOTICE: pSource may be SGenericSource *, if it is SORT_MULTISOURCE_MERGE blockDataDestroy(pSource->src.pBlock); @@ -134,15 +134,14 @@ int32_t tsortAddSource(SSortHandle* pSortHandle, void* pSource) { return TSDB_CODE_SUCCESS; } -static int32_t doAddNewExternalMemSource(SDiskbasedBuf *pBuf, SArray* pAllSources, SSDataBlock* pBlock, int32_t* sourceId) { +static int32_t doAddNewExternalMemSource(SDiskbasedBuf *pBuf, SArray* pAllSources, SSDataBlock* pBlock, int32_t* sourceId, SArray* pPageIdList) { SSortSource* pSource = taosMemoryCalloc(1, sizeof(SSortSource)); if (pSource == NULL) { return TSDB_CODE_QRY_OUT_OF_MEMORY; } - pSource->pageIdList = getDataBufPagesIdList(pBuf, (*sourceId)); pSource->src.pBlock = pBlock; - + pSource->pageIdList = pPageIdList; taosArrayPush(pAllSources, &pSource); (*sourceId) += 1; @@ -171,6 +170,7 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) { } } + SArray* pPageIdList = taosArrayInit(4, sizeof(int32_t)); while(start < pDataBlock->info.rows) { int32_t stop = 0; blockDataSplitRows(pDataBlock, pDataBlock->info.hasVarCol, start, &stop, pHandle->pageSize); @@ -180,12 +180,14 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) { } int32_t pageId = -1; - void* pPage = getNewBufPage(pHandle->pBuf, pHandle->sourceId, &pageId); + void* pPage = getNewBufPage(pHandle->pBuf, &pageId); if (pPage == NULL) { blockDataDestroy(p); return terrno; } + taosArrayPush(pPageIdList, &pageId); + int32_t size = blockDataGetSize(p) + sizeof(int32_t) + taosArrayGetSize(p->pDataBlock) * sizeof(int32_t); assert(size <= getBufPageSize(pHandle->pBuf)); @@ -201,7 +203,7 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) { blockDataCleanup(pDataBlock); SSDataBlock* pBlock = createOneDataBlock(pDataBlock, false); - return doAddNewExternalMemSource(pHandle->pBuf, pHandle->pOrderedSource, pBlock, &pHandle->sourceId); + return doAddNewExternalMemSource(pHandle->pBuf, pHandle->pOrderedSource, pBlock, &pHandle->sourceId, pPageIdList); } static void setCurrentSourceIsDone(SSortSource* pSource, SSortHandle* pHandle) { @@ -502,6 +504,7 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) { return code; } + SArray* pPageIdList = taosArrayInit(4, sizeof(int32_t)); while (1) { SSDataBlock* pDataBlock = getSortedBlockDataInner(pHandle, &pHandle->cmpParam, numOfRows); if (pDataBlock == NULL) { @@ -509,11 +512,13 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) { } int32_t pageId = -1; - void* pPage = getNewBufPage(pHandle->pBuf, pHandle->sourceId, &pageId); + void* pPage = getNewBufPage(pHandle->pBuf, &pageId); if (pPage == NULL) { return terrno; } + taosArrayPush(pPageIdList, &pageId); + int32_t size = blockDataGetSize(pDataBlock) + sizeof(int32_t) + taosArrayGetSize(pDataBlock->pDataBlock) * sizeof(int32_t); assert(size <= getBufPageSize(pHandle->pBuf)); @@ -525,12 +530,12 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) { blockDataCleanup(pDataBlock); } - sortComparClearup(&pHandle->cmpParam); + sortComparCleanup(&pHandle->cmpParam); tMergeTreeDestroy(pHandle->pMergeTree); pHandle->numOfCompletedSources = 0; SSDataBlock* pBlock = createOneDataBlock(pHandle->pDataBlock, false); - code = doAddNewExternalMemSource(pHandle->pBuf, pResList, pBlock, &pHandle->sourceId); + code = doAddNewExternalMemSource(pHandle->pBuf, pResList, pBlock, &pHandle->sourceId, pPageIdList); if (code != 0) { return code; } diff --git a/source/libs/executor/test/executorTests.cpp b/source/libs/executor/test/executorTests.cpp index bba4b254c5d56f2c72988897273d363a3fec3c0c..1c4216334945c0b682e313a975e558390fbd7049 100644 --- a/source/libs/executor/test/executorTests.cpp +++ b/source/libs/executor/test/executorTests.cpp @@ -26,7 +26,6 @@ #include "executor.h" #include "executorimpl.h" #include "function.h" -#include "stub.h" #include "taos.h" #include "tdatablock.h" #include "tdef.h" diff --git a/source/libs/executor/test/lhashTests.cpp b/source/libs/executor/test/lhashTests.cpp index 695552faa0f353cc631b87cf03f51003c7b66aed..c9b75395bce345802ff0e563762758601aca0a18 100644 --- a/source/libs/executor/test/lhashTests.cpp +++ b/source/libs/executor/test/lhashTests.cpp @@ -26,40 +26,47 @@ TEST(testCase, linear_hash_Tests) { taosSeedRand(taosGetTimestampSec()); + strcpy(tsTempDir, "/tmp/"); _hash_fn_t fn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT); -#if 0 - SLHashObj* pHashObj = tHashInit(256, 4096, fn, 320); - for(int32_t i = 0; i < 5000000; ++i) { + + int64_t st = taosGetTimestampUs(); + + SLHashObj* pHashObj = tHashInit(4098*4*2, 512, fn, 40); + for(int32_t i = 0; i < 1000000; ++i) { int32_t code = tHashPut(pHashObj, &i, sizeof(i), &i, sizeof(i)); assert(code == 0); } // tHashPrint(pHashObj, LINEAR_HASH_STATIS); + int64_t et = taosGetTimestampUs(); -// for(int32_t i = 0; i < 10000; ++i) { -// char* v = tHashGet(pHashObj, &i, sizeof(i)); -// if (v != NULL) { -//// printf("find value: %d, key:%d\n", *(int32_t*) v, i); -// } else { + for(int32_t i = 0; i < 1000000; ++i) { + if (i == 950000) { + printf("kf\n"); + } + char* v = tHashGet(pHashObj, &i, sizeof(i)); + if (v != NULL) { +// printf("find value: %d, key:%d\n", *(int32_t*) v, i); + } else { // printf("failed to found key:%d in hash\n", i); -// } -// } + } + } - tHashPrint(pHashObj, LINEAR_HASH_STATIS); +// tHashPrint(pHashObj, LINEAR_HASH_STATIS); tHashCleanup(pHashObj); -#endif + int64_t et1 = taosGetTimestampUs(); -#if 0 - SHashObj* pHashObj = taosHashInit(1000, fn, false, HASH_NO_LOCK); + SHashObj* pHashObj1 = taosHashInit(1000, fn, false, HASH_NO_LOCK); for(int32_t i = 0; i < 1000000; ++i) { - taosHashPut(pHashObj, &i, sizeof(i), &i, sizeof(i)); + taosHashPut(pHashObj1, &i, sizeof(i), &i, sizeof(i)); } - for(int32_t i = 0; i < 10000; ++i) { - void* v = taosHashGet(pHashObj, &i, sizeof(i)); + for(int32_t i = 0; i < 1000000; ++i) { + void* v = taosHashGet(pHashObj1, &i, sizeof(i)); } - taosHashCleanup(pHashObj); -#endif + taosHashCleanup(pHashObj1); + int64_t et2 = taosGetTimestampUs(); + printf("linear hash time:%.2f ms, buildHash:%.2f ms, hash:%.2f\n", (et1-st)/1000.0, (et-st)/1000.0, (et2-et1)/1000.0); } \ No newline at end of file diff --git a/source/libs/executor/test/sortTests.cpp b/source/libs/executor/test/sortTests.cpp index 6e244152f20e0d4b914b21fcb871a5bbec871fce..4ac15670ac5dca547572df102f7267de08c0306d 100644 --- a/source/libs/executor/test/sortTests.cpp +++ b/source/libs/executor/test/sortTests.cpp @@ -27,7 +27,6 @@ #include "executorimpl.h" #include "executor.h" -#include "stub.h" #include "taos.h" #include "tdatablock.h" #include "tdef.h" @@ -196,7 +195,7 @@ int32_t docomp(const void* p1, const void* p2, void* param) { } } // namespace -#if 1 +#if 0 TEST(testCase, inMem_sort_Test) { SBlockOrderInfo oi = {0}; oi.order = TSDB_ORDER_ASC; @@ -382,7 +381,7 @@ TEST(testCase, ordered_merge_sort_Test) { } void* v = tsortGetValue(pTupleHandle, 0); - printf("%d: %d\n", row, *(int32_t*) v); +// printf("%d: %d\n", row, *(int32_t*) v); ASSERT_EQ(row++, *(int32_t*) v); } diff --git a/source/libs/executor/test/tSimpleHashTests.cpp b/source/libs/executor/test/tSimpleHashTests.cpp index a17a7146eabd55914b0143de55ddf0a732cac162..3bf339ef9040879c0978f9bedffb2b23bd8ec806 100644 --- a/source/libs/executor/test/tSimpleHashTests.cpp +++ b/source/libs/executor/test/tSimpleHashTests.cpp @@ -30,46 +30,115 @@ // return RUN_ALL_TESTS(); // } -TEST(testCase, tSimpleHashTest) { +TEST(testCase, tSimpleHashTest_intKey) { SSHashObj *pHashObj = - tSimpleHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), sizeof(int64_t), sizeof(int64_t)); + tSimpleHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT)); assert(pHashObj != nullptr); ASSERT_EQ(0, tSimpleHashGetSize(pHashObj)); + size_t keyLen = sizeof(int64_t); + size_t dataLen = sizeof(int64_t); + int64_t originKeySum = 0; for (int64_t i = 1; i <= 100; ++i) { originKeySum += i; - tSimpleHashPut(pHashObj, (const void *)&i, (const void *)&i); + tSimpleHashPut(pHashObj, (const void *)&i, keyLen, (const void *)&i, dataLen); ASSERT_EQ(i, tSimpleHashGetSize(pHashObj)); } for (int64_t i = 1; i <= 100; ++i) { - void *data = tSimpleHashGet(pHashObj, (const void *)&i); + void *data = tSimpleHashGet(pHashObj, (const void *)&i, keyLen); ASSERT_EQ(i, *(int64_t *)data); } - void *data = NULL; int32_t iter = 0; int64_t keySum = 0; int64_t dataSum = 0; + size_t kLen = 0; while ((data = tSimpleHashIterate(pHashObj, data, &iter))) { - void *key = tSimpleHashGetKey(pHashObj, data, NULL); + void *key = tSimpleHashGetKey(data, &kLen); + ASSERT_EQ(keyLen, kLen); keySum += *(int64_t *)key; dataSum += *(int64_t *)data; } - + ASSERT_EQ(keySum, dataSum); ASSERT_EQ(keySum, originKeySum); for (int64_t i = 1; i <= 100; ++i) { - tSimpleHashRemove(pHashObj, (const void *)&i); + tSimpleHashRemove(pHashObj, (const void *)&i, keyLen); ASSERT_EQ(100 - i, tSimpleHashGetSize(pHashObj)); } tSimpleHashCleanup(pHashObj); } + +TEST(testCase, tSimpleHashTest_binaryKey) { + SSHashObj *pHashObj = + tSimpleHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT)); + + assert(pHashObj != nullptr); + + ASSERT_EQ(0, tSimpleHashGetSize(pHashObj)); + + typedef struct { + int64_t suid; + int64_t uid; + } SCombineKey; + + size_t keyLen = sizeof(SCombineKey); + size_t dataLen = sizeof(int64_t); + + int64_t originDataSum = 0; + SCombineKey combineKey = {0}; + for (int64_t i = 1; i <= 100; ++i) { + combineKey.suid = i; + combineKey.uid = i + 1; + tSimpleHashPut(pHashObj, (const void *)&combineKey, keyLen, (const void *)&i, dataLen); + originDataSum += i; + ASSERT_EQ(i, tSimpleHashGetSize(pHashObj)); + } + + for (int64_t i = 1; i <= 100; ++i) { + combineKey.suid = i; + combineKey.uid = i + 1; + void *data = tSimpleHashGet(pHashObj, (const void *)&combineKey, keyLen); + ASSERT_EQ(i, *(int64_t *)data); + } + + void *data = NULL; + int32_t iter = 0; + int64_t keySum = 0; + int64_t dataSum = 0; + size_t kLen = 0; + while ((data = tSimpleHashIterate(pHashObj, data, &iter))) { + void *key = tSimpleHashGetKey(data, &kLen); + ASSERT_EQ(keyLen, kLen); + dataSum += *(int64_t *)data; + } + + ASSERT_EQ(originDataSum, dataSum); + + tSimpleHashRemove(pHashObj, (const void *)&combineKey, keyLen); + + while ((data = tSimpleHashIterate(pHashObj, data, &iter))) { + void *key = tSimpleHashGetKey(data, &kLen); + ASSERT_EQ(keyLen, kLen); + } + + for (int64_t i = 1; i <= 99; ++i) { + combineKey.suid = i; + combineKey.uid = i + 1; + tSimpleHashRemove(pHashObj, (const void *)&combineKey, keyLen); + ASSERT_EQ(99 - i, tSimpleHashGetSize(pHashObj)); + } + + tSimpleHashCleanup(pHashObj); +} + + #pragma GCC diagnostic pop \ No newline at end of file diff --git a/source/libs/function/inc/tpercentile.h b/source/libs/function/inc/tpercentile.h index dfb52f76946c502b38231130858b5694b7171f35..554f9e567f35cc0272a2a9755153de1b54d34392 100644 --- a/source/libs/function/inc/tpercentile.h +++ b/source/libs/function/inc/tpercentile.h @@ -51,20 +51,20 @@ struct tMemBucket; typedef int32_t (*__perc_hash_func_t)(struct tMemBucket *pBucket, const void *value); typedef struct tMemBucket { - int16_t numOfSlots; - int16_t type; - int16_t bytes; - int32_t total; - int32_t elemPerPage; // number of elements for each object - int32_t maxCapacity; // maximum allowed number of elements that can be sort directly to get the result - int32_t bufPageSize; // disk page size - MinMaxEntry range; // value range - int32_t times; // count that has been checked for deciding the correct data value buckets. - __compar_fn_t comparFn; - - tMemBucketSlot * pSlots; - SDiskbasedBuf *pBuffer; - __perc_hash_func_t hashFunc; + int16_t numOfSlots; + int16_t type; + int16_t bytes; + int32_t total; + int32_t elemPerPage; // number of elements for each object + int32_t maxCapacity; // maximum allowed number of elements that can be sort directly to get the result + int32_t bufPageSize; // disk page size + MinMaxEntry range; // value range + int32_t times; // count that has been checked for deciding the correct data value buckets. + __compar_fn_t comparFn; + tMemBucketSlot* pSlots; + SDiskbasedBuf* pBuffer; + __perc_hash_func_t hashFunc; + SHashObj* groupPagesMap; // disk page map for different groups; } tMemBucket; tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, double maxval); diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index b234ff97c9acdad3847081741a08144aa8df6c55..b7cd02befd326eafd7b0a9354a3f5c50147d834b 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -192,6 +192,24 @@ static bool validateTimezoneFormat(const SValueNode* pVal) { return true; } +static int32_t countTrailingSpaces(const SValueNode* pVal, bool isLtrim) { + int32_t numOfSpaces = 0; + int32_t len = varDataLen(pVal->datum.p); + char* str = varDataVal(pVal->datum.p); + + int32_t startPos = isLtrim ? 0 : len - 1; + int32_t step = isLtrim ? 1 : -1; + for (int32_t i = startPos; i < len || i >= 0; i += step) { + if (!isspace(str[i])) { + break; + } + numOfSpaces++; + } + + return numOfSpaces; + +} + void static addTimezoneParam(SNodeList* pList) { char buf[6] = {0}; time_t t = taosTime(NULL); @@ -285,7 +303,7 @@ static int32_t translateInOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t le } SExprNode* pPara1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); - if (!IS_VAR_DATA_TYPE(pPara1->resType.type)) { + if (!IS_STR_DATA_TYPE(pPara1->resType.type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -293,6 +311,40 @@ static int32_t translateInOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t le return TSDB_CODE_SUCCESS; } +static int32_t translateTrimStr(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isLtrim) { + if (1 != LIST_LENGTH(pFunc->pParameterList)) { + return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); + } + + SExprNode* pPara1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); + if (!IS_STR_DATA_TYPE(pPara1->resType.type)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + int32_t numOfSpaces = 0; + SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 0); + // for select trim functions with constant value from table, + // need to set the proper result result schema bytes to avoid + // trailing garbage characters + if (nodeType(pParamNode1) == QUERY_NODE_VALUE) { + SValueNode* pValue = (SValueNode*)pParamNode1; + numOfSpaces = countTrailingSpaces(pValue, isLtrim); + } + + + int32_t resBytes = pPara1->resType.bytes - numOfSpaces; + pFunc->node.resType = (SDataType){.bytes = resBytes, .type = pPara1->resType.type}; + return TSDB_CODE_SUCCESS; +} + +static int32_t translateLtrim(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + return translateTrimStr(pFunc, pErrBuf, len, true); +} + +static int32_t translateRtrim(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + return translateTrimStr(pFunc, pErrBuf, len, false); +} + static int32_t translateLogarithm(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); if (1 != numOfParams && 2 != numOfParams) { @@ -494,7 +546,7 @@ static int32_t translateApercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t // param2 if (3 == numOfParams) { uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type; - if (!IS_VAR_DATA_TYPE(para3Type)) { + if (!IS_STR_DATA_TYPE(para3Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -541,7 +593,7 @@ static int32_t translateApercentileImpl(SFunctionNode* pFunc, char* pErrBuf, int // param2 if (3 == numOfParams) { uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type; - if (!IS_VAR_DATA_TYPE(para3Type)) { + if (!IS_STR_DATA_TYPE(para3Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1336,7 +1388,7 @@ static int32_t translateSample(SFunctionNode* pFunc, char* pErrBuf, int32_t len) } // set result type - if (IS_VAR_DATA_TYPE(colType)) { + if (IS_STR_DATA_TYPE(colType)) { pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType}; } else { pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType}; @@ -1379,7 +1431,7 @@ static int32_t translateTail(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { } // set result type - if (IS_VAR_DATA_TYPE(colType)) { + if (IS_STR_DATA_TYPE(colType)) { pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType}; } else { pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType}; @@ -1451,12 +1503,18 @@ static int32_t translateInterp(SFunctionNode* pFunc, char* pErrBuf, int32_t len) return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } + uint8_t nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, 0)); + uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + if (!IS_NUMERIC_TYPE(paraType) || QUERY_NODE_VALUE == nodeType) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + if (3 <= numOfParams) { int64_t timeVal[2] = {0}; for (int32_t i = 1; i < 3; ++i) { - uint8_t nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, i)); - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type; - if (!IS_VAR_DATA_TYPE(paraType) || QUERY_NODE_VALUE != nodeType) { + nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, i)); + paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type; + if (!IS_STR_DATA_TYPE(paraType) || QUERY_NODE_VALUE != nodeType) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1473,8 +1531,8 @@ static int32_t translateInterp(SFunctionNode* pFunc, char* pErrBuf, int32_t len) } if (4 == numOfParams) { - uint8_t nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, 3)); - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type; + nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, 3)); + paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type; if (!IS_INTEGER_TYPE(paraType) || QUERY_NODE_VALUE != nodeType) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1624,7 +1682,7 @@ static int32_t translateLength(SFunctionNode* pFunc, char* pErrBuf, int32_t len) return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - if (!IS_VAR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) { + if (!IS_STR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1656,7 +1714,7 @@ static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t for (int32_t i = 0; i < numOfParams; ++i) { SNode* pPara = nodesListGetNode(pFunc->pParameterList, i); uint8_t paraType = ((SExprNode*)pPara)->resType.type; - if (!IS_VAR_DATA_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { + if (!IS_STR_DATA_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } if (TSDB_DATA_TYPE_NCHAR == paraType) { @@ -1712,7 +1770,7 @@ static int32_t translateSubstr(SFunctionNode* pFunc, char* pErrBuf, int32_t len) uint8_t para0Type = pPara0->resType.type; uint8_t para1Type = pPara1->resType.type; - if (!IS_VAR_DATA_TYPE(para0Type) || !IS_INTEGER_TYPE(para1Type)) { + if (!IS_STR_DATA_TYPE(para0Type) || !IS_INTEGER_TYPE(para1Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1744,7 +1802,7 @@ static int32_t translateCast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { uint8_t para2Type = pFunc->node.resType.type; int32_t para2Bytes = pFunc->node.resType.bytes; - if (IS_VAR_DATA_TYPE(para2Type)) { + if (IS_STR_DATA_TYPE(para2Type)) { para2Bytes -= VARSTR_HEADER_SIZE; } if (para2Bytes <= 0 || para2Bytes > 4096) { // cast dst var type length limits to 4096 bytes @@ -1801,7 +1859,7 @@ static int32_t translateToUnixtimestamp(SFunctionNode* pFunc, char* pErrBuf, int return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - if (!IS_VAR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) { + if (!IS_STR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1820,7 +1878,7 @@ static int32_t translateTimeTruncate(SFunctionNode* pFunc, char* pErrBuf, int32_ uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; - if ((!IS_VAR_DATA_TYPE(para1Type) && !IS_INTEGER_TYPE(para1Type) && TSDB_DATA_TYPE_TIMESTAMP != para1Type) || + if ((!IS_STR_DATA_TYPE(para1Type) && !IS_INTEGER_TYPE(para1Type) && TSDB_DATA_TYPE_TIMESTAMP != para1Type) || !IS_INTEGER_TYPE(para2Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -1853,7 +1911,7 @@ static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t le for (int32_t i = 0; i < 2; ++i) { uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type; - if (!IS_VAR_DATA_TYPE(paraType) && !IS_INTEGER_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) { + if (!IS_STR_DATA_TYPE(paraType) && !IS_INTEGER_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } } @@ -2827,7 +2885,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "ltrim", .type = FUNCTION_TYPE_LTRIM, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, - .translateFunc = translateInOutStr, + .translateFunc = translateLtrim, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = ltrimFunction, @@ -2837,7 +2895,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "rtrim", .type = FUNCTION_TYPE_RTRIM, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, - .translateFunc = translateInOutStr, + .translateFunc = translateRtrim, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = rtrimFunction, diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index de72c32fa14a5b999ac80cfe24da96843bb40b43..b71d06231e78c4edd904dd72d546284b51dd89ac 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -468,7 +468,7 @@ int32_t functionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); - pResInfo->isNullRes = (pResInfo->isNullRes == 1) ? 1 : (pResInfo->numOfRes == 0); + pResInfo->isNullRes = (pResInfo->numOfRes == 0) ? 1 : 0; char* in = GET_ROWCELL_INTERBUF(pResInfo); colDataAppend(pCol, pBlock->info.rows, in, pResInfo->isNullRes); @@ -498,7 +498,7 @@ int32_t functionFinalizeWithResultBuf(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); - pResInfo->isNullRes = (pResInfo->isNullRes == 1) ? 1 : (pResInfo->numOfRes == 0);; + pResInfo->isNullRes = (pResInfo->numOfRes == 0) ? 1 : 0; char* in = finalResult; colDataAppend(pCol, pBlock->info.rows, in, pResInfo->isNullRes); @@ -663,8 +663,7 @@ int32_t sumFunction(SqlFunctionCtx* pCtx) { // check for overflow if (IS_FLOAT_TYPE(type) && (isinf(pSumRes->dsum) || isnan(pSumRes->dsum))) { - GET_RES_INFO(pCtx)->isNullRes = 1; - numOfElem = 1; + numOfElem = 0; } _sum_over: @@ -791,8 +790,7 @@ int32_t avgFunction(SqlFunctionCtx* pCtx) { int32_t numOfRows = pInput->numOfRows; if (IS_NULL_TYPE(type)) { - GET_RES_INFO(pCtx)->isNullRes = 1; - numOfElem = 1; + numOfElem = 0; goto _avg_over; } @@ -1148,8 +1146,9 @@ bool getMinmaxFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) { return true; } -static void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos); -static void doCopyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos); +static STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock); +static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos); +static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos); static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, const char* tval) { // the data is loaded, not only the block SMA value @@ -1201,7 +1200,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { pBuf->v = *(int64_t*)tval; if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); } } else { if (IS_SIGNED_NUMERIC_TYPE(type)) { @@ -1210,10 +1209,10 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { int64_t val = GET_INT64_VAL(tval); if ((prev < val) ^ isMinFunc) { - pBuf->v = val; + *(int64_t*)&pBuf->v = val; if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); } } @@ -1223,36 +1222,36 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { uint64_t val = GET_UINT64_VAL(tval); if ((prev < val) ^ isMinFunc) { - pBuf->v = val; + *(uint64_t*)&pBuf->v = val; if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); } } } else if (type == TSDB_DATA_TYPE_DOUBLE) { double prev = 0; - GET_TYPED_DATA(prev, int64_t, type, &pBuf->v); + GET_TYPED_DATA(prev, double, type, &pBuf->v); double val = GET_DOUBLE_VAL(tval); if ((prev < val) ^ isMinFunc) { - pBuf->v = val; + *(double*)&pBuf->v = val; if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); } } } else if (type == TSDB_DATA_TYPE_FLOAT) { double prev = 0; - GET_TYPED_DATA(prev, int64_t, type, &pBuf->v); + GET_TYPED_DATA(prev, double, type, &pBuf->v); double val = GET_DOUBLE_VAL(tval); if ((prev < val) ^ isMinFunc) { - pBuf->v = val; + *(double*)&pBuf->v = val; } if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); } } } @@ -1277,7 +1276,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1289,7 +1288,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1308,7 +1307,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1320,7 +1319,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1339,7 +1338,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1351,7 +1350,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1370,7 +1369,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1382,7 +1381,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1403,7 +1402,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1415,7 +1414,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1434,7 +1433,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1446,7 +1445,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1465,7 +1464,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1477,7 +1476,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1496,7 +1495,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1508,7 +1507,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1528,7 +1527,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1540,7 +1539,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1559,7 +1558,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (!pBuf->assign) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); } pBuf->assign = true; } else { @@ -1571,7 +1570,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if ((*val < pData[i]) ^ isMinFunc) { *val = pData[i]; if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); } } } @@ -1582,7 +1581,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { _min_max_over: if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved ) { - doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pBuf->nullTuplePos); + pBuf->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); pBuf->nullTupleSaved = true; } return numOfElems; @@ -1601,8 +1600,7 @@ int32_t maxFunction(SqlFunctionCtx* pCtx) { } static void setNullSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t rowIndex); - -static void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rIndex); +static void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rowIndex); int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx); @@ -1613,7 +1611,7 @@ int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { int32_t currentRow = pBlock->info.rows; SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); - pEntryInfo->isNullRes = (pEntryInfo->isNullRes == 1) ? 1 : (pEntryInfo->numOfRes == 0); + pEntryInfo->isNullRes = (pEntryInfo->numOfRes == 0) ? 1 : 0; if (pCol->info.type == TSDB_DATA_TYPE_FLOAT) { float v = *(double*)&pRes->v; @@ -1650,34 +1648,29 @@ void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuple return; } - int32_t pageId = pTuplePos->pageId; - int32_t offset = pTuplePos->offset; + if (pCtx->saveHandle.pBuf != NULL) { + if (pTuplePos->pageId != -1) { + int32_t numOfCols = pCtx->subsidiaries.num; + const char* p = loadTupleData(pCtx, pTuplePos); - if (pTuplePos->pageId != -1) { - int32_t numOfCols = pCtx->subsidiaries.num; - SFilePage* pPage = getBufPage(pCtx->pBuf, pageId); + bool* nullList = (bool*)p; + char* pStart = (char*)(nullList + numOfCols * sizeof(bool)); - bool* nullList = (bool*)((char*)pPage + offset); - char* pStart = (char*)(nullList + numOfCols * sizeof(bool)); + // todo set the offset value to optimize the performance. + for (int32_t j = 0; j < numOfCols; ++j) { + SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j]; + int32_t dstSlotId = pc->pExpr->base.resSchema.slotId; - // todo set the offset value to optimize the performance. - for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) { - SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j]; - - SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0]; - int32_t dstSlotId = pc->pExpr->base.resSchema.slotId; - - SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId); - ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes); - if (nullList[j]) { - colDataAppendNULL(pDstCol, rowIndex); - } else { - colDataAppend(pDstCol, rowIndex, pStart, false); + SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId); + ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes); + if (nullList[j]) { + colDataAppendNULL(pDstCol, rowIndex); + } else { + colDataAppend(pDstCol, rowIndex, pStart, false); + } + pStart += pDstCol->info.bytes; } - pStart += pDstCol->info.bytes; } - - releaseBufPage(pCtx->pBuf, pPage); } } @@ -1792,8 +1785,7 @@ int32_t stddevFunction(SqlFunctionCtx* pCtx) { int32_t numOfRows = pInput->numOfRows; if (IS_NULL_TYPE(type)) { - GET_RES_INFO(pCtx)->isNullRes = 1; - numOfElem = 1; + numOfElem = 0; goto _stddev_over; } @@ -2759,15 +2751,15 @@ static FORCE_INLINE TSKEY getRowPTs(SColumnInfoData* pTsColInfo, int32_t rowInde return *(TSKEY*)colDataGetData(pTsColInfo, rowIndex); } -static void saveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx, SFirstLastRes* pInfo) { +static void firstlastSaveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx, SFirstLastRes* pInfo) { if (pCtx->subsidiaries.num <= 0) { return; } if (!pInfo->hasResult) { - doSaveTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos); + pInfo->pos = saveTupleData(pCtx, rowIndex, pSrcBlock); } else { - doCopyTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos); + updateTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos); } } @@ -2781,7 +2773,7 @@ static void doSaveCurrentVal(SqlFunctionCtx* pCtx, int32_t rowIndex, int64_t cur memcpy(pInfo->buf, pData, pInfo->bytes); pInfo->ts = currentTs; - saveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo); + firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo); pInfo->hasResult = true; } @@ -2985,7 +2977,7 @@ static void firstLastTransferInfo(SqlFunctionCtx* pCtx, SFirstLastRes* pInput, S pOutput->bytes = pInput->bytes; memcpy(pOutput->buf, pInput->buf, pOutput->bytes); - saveTupleData(pCtx->pSrcBlock, start, pCtx, pOutput); + firstlastSaveTupleData(pCtx->pSrcBlock, start, pCtx, pOutput); pOutput->hasResult = true; } @@ -3090,7 +3082,7 @@ static void doSaveLastrow(SqlFunctionCtx* pCtx, char* pData, int32_t rowIndex, i } pInfo->ts = cts; - saveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo); + firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo); pInfo->hasResult = true; } @@ -3423,7 +3415,7 @@ int32_t topFunction(SqlFunctionCtx* pCtx) { } if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) { - doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos); + pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); pRes->nullTupleSaved = true; } return TSDB_CODE_SUCCESS; @@ -3451,7 +3443,7 @@ int32_t bottomFunction(SqlFunctionCtx* pCtx) { } if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) { - doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos); + pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); pRes->nullTupleSaved = true; } @@ -3503,7 +3495,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData // save the data of this tuple if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos); + pItem->tuplePos = saveTupleData(pCtx, rowIndex, pSrcBlock); } #ifdef BUF_PAGE_DEBUG qDebug("page_saveTuple i:%d, item:%p,pageId:%d, offset:%d\n", pEntryInfo->numOfRes, pItem, pItem->tuplePos.pageId, @@ -3527,7 +3519,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData // save the data of this tuple by over writing the old data if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos); + updateTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos); } #ifdef BUF_PAGE_DEBUG qDebug("page_copyTuple pageId:%d, offset:%d", pItem->tuplePos.pageId, pItem->tuplePos.offset); @@ -3544,38 +3536,13 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData * |(n columns, one bit for each column)| src column #1| src column #2| * +------------------------------------+--------------+--------------+ */ -void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) { - SFilePage* pPage = NULL; +void* serializeTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SSubsidiaryResInfo* pSubsidiaryies, char* buf) { + char* nullList = buf; + char* pStart = (char*)(nullList + sizeof(bool) * pSubsidiaryies->num); - // todo refactor: move away - int32_t completeRowSize = pCtx->subsidiaries.num * sizeof(bool); - for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) { - SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j]; - completeRowSize += pc->pExpr->base.resSchema.bytes; - } - - if (pCtx->curBufPage == -1) { - pPage = getNewBufPage(pCtx->pBuf, 0, &pCtx->curBufPage); - pPage->num = sizeof(SFilePage); - } else { - pPage = getBufPage(pCtx->pBuf, pCtx->curBufPage); - if (pPage->num + completeRowSize > getBufPageSize(pCtx->pBuf)) { - // current page is all used, let's prepare a new buffer page - releaseBufPage(pCtx->pBuf, pPage); - pPage = getNewBufPage(pCtx->pBuf, 0, &pCtx->curBufPage); - pPage->num = sizeof(SFilePage); - } - } - - pPos->pageId = pCtx->curBufPage; - pPos->offset = pPage->num; - - // keep the current row data, extract method int32_t offset = 0; - bool* nullList = (bool*)((char*)pPage + pPage->num); - char* pStart = (char*)(nullList + sizeof(bool) * pCtx->subsidiaries.num); - for (int32_t i = 0; i < pCtx->subsidiaries.num; ++i) { - SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i]; + for (int32_t i = 0; i < pSubsidiaryies->num; ++i) { + SqlFunctionCtx* pc = pSubsidiaryies->pCtx[i]; SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0]; int32_t srcSlotId = pFuncParam->pCol->slotId; @@ -3596,57 +3563,95 @@ void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* offset += pCol->info.bytes; } - pPage->num += completeRowSize; - - setBufPageDirty(pPage, true); - releaseBufPage(pCtx->pBuf, pPage); -#ifdef BUF_PAGE_DEBUG - qDebug("page_saveTuple pos:%p,pageId:%d, offset:%d\n", pPos, pPos->pageId, pPos->offset); -#endif + return buf; } -void doCopyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) { - SFilePage* pPage = getBufPage(pCtx->pBuf, pPos->pageId); +static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length) { + STuplePos p = {0}; + if (pHandle->pBuf != NULL) { + SFilePage* pPage = NULL; - int32_t numOfCols = pCtx->subsidiaries.num; + if (pHandle->currentPage == -1) { + pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage); + pPage->num = sizeof(SFilePage); + } else { + pPage = getBufPage(pHandle->pBuf, pHandle->currentPage); + if (pPage->num + length > getBufPageSize(pHandle->pBuf)) { + // current page is all used, let's prepare a new buffer page + releaseBufPage(pHandle->pBuf, pPage); + pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage); + pPage->num = sizeof(SFilePage); + } + } - bool* nullList = (bool*)((char*)pPage + pPos->offset); - char* pStart = (char*)(nullList + numOfCols * sizeof(bool)); + p = (STuplePos) {.pageId = pHandle->currentPage, .offset = pPage->num}; + memcpy(pPage->data + pPage->num, pBuf, length); - int32_t offset = 0; - for (int32_t i = 0; i < numOfCols; ++i) { - SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i]; - SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0]; - int32_t srcSlotId = pFuncParam->pCol->slotId; + pPage->num += length; + setBufPageDirty(pPage, true); + releaseBufPage(pHandle->pBuf, pPage); + } else { + // other tuple save policy + } - SColumnInfoData* pCol = taosArrayGet(pSrcBlock->pDataBlock, srcSlotId); - if ((nullList[i] = colDataIsNull_s(pCol, rowIndex)) == true) { - offset += pCol->info.bytes; - continue; - } + return p; +} - char* p = colDataGetData(pCol, rowIndex); - if (IS_VAR_DATA_TYPE(pCol->info.type)) { - memcpy(pStart + offset, p, (pCol->info.type == TSDB_DATA_TYPE_JSON) ? getJsonValueLen(p) : varDataTLen(p)); - } else { - memcpy(pStart + offset, p, pCol->info.bytes); +STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock) { + if (pCtx->subsidiaries.rowLen == 0) { + int32_t rowLen = 0; + for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) { + SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j]; + rowLen += pc->pExpr->base.resSchema.bytes; } - offset += pCol->info.bytes; + pCtx->subsidiaries.rowLen = rowLen + pCtx->subsidiaries.num * sizeof(bool); + pCtx->subsidiaries.buf = taosMemoryMalloc(pCtx->subsidiaries.rowLen); } - setBufPageDirty(pPage, true); - releaseBufPage(pCtx->pBuf, pPage); -#ifdef BUF_PAGE_DEBUG - qDebug("page_copyTuple pos:%p, pageId:%d, offset:%d", pPos, pPos->pageId, pPos->offset); -#endif + char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf); + return doSaveTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen); +} + +static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, STuplePos* pPos) { + if (pHandle->pBuf != NULL) { + SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId); + memcpy(pPage->data + pPos->offset, pBuf, length); + setBufPageDirty(pPage, true); + releaseBufPage(pHandle->pBuf, pPage); + } else { + + } + + return TSDB_CODE_SUCCESS; +} + +static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) { + char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf); + doUpdateTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, pPos); + return TSDB_CODE_SUCCESS; +} + +static char* doLoadTupleData(SSerializeDataHandle* pHandle, const STuplePos* pPos) { + if (pHandle->pBuf != NULL) { + SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId); + char* p = pPage->data + pPos->offset; + releaseBufPage(pHandle->pBuf, pPage); + return p; + } else { + return NULL; + } +} + +static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos) { + return doLoadTupleData(&pCtx->saveHandle, pPos); } int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx); STopBotRes* pRes = getTopBotOutputInfo(pCtx); - int16_t type = pCtx->input.pData[0]->info.type; + int16_t type = pCtx->pExpr->base.resSchema.type; int32_t slotId = pCtx->pExpr->base.resSchema.slotId; SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); @@ -3791,8 +3796,6 @@ int32_t spreadFunction(SqlFunctionCtx* pCtx) { SColumnInfoData* pCol = pInput->pData[0]; int32_t start = pInput->startRowIndex; - int32_t numOfRows = pInput->numOfRows; - // check the valid data one by one for (int32_t i = start; i < pInput->numOfRows + start; ++i) { if (colDataIsNull_f(pCol->nullbitmap, i)) { @@ -3970,16 +3973,16 @@ int32_t elapsedFunction(SqlFunctionCtx* pCtx) { TSKEY* ptsList = (int64_t*)colDataGetData(pCol, 0); if (pCtx->order == TSDB_ORDER_DESC) { if (pCtx->start.key == INT64_MIN) { - pInfo->max = - (pInfo->max < ptsList[start + pInput->numOfRows - 1]) ? ptsList[start + pInput->numOfRows - 1] : pInfo->max; + pInfo->max = (pInfo->max < ptsList[start]) ? ptsList[start] : pInfo->max; } else { pInfo->max = pCtx->start.key + 1; } - if (pCtx->end.key != INT64_MIN) { - pInfo->min = pCtx->end.key; + if (pCtx->end.key == INT64_MIN) { + pInfo->min = (pInfo->min > ptsList[start + pInput->numOfRows - 1]) ? + ptsList[start + pInput->numOfRows - 1] : pInfo->min; } else { - pInfo->min = ptsList[start]; + pInfo->min = pCtx->end.key; } } else { if (pCtx->start.key == INT64_MIN) { @@ -3988,10 +3991,11 @@ int32_t elapsedFunction(SqlFunctionCtx* pCtx) { pInfo->min = pCtx->start.key; } - if (pCtx->end.key != INT64_MIN) { - pInfo->max = pCtx->end.key + 1; + if (pCtx->end.key == INT64_MIN) { + pInfo->max = (pInfo->max < ptsList[start + pInput->numOfRows - 1]) ? + ptsList[start + pInput->numOfRows - 1] : pInfo->max; } else { - pInfo->max = ptsList[start + pInput->numOfRows - 1]; + pInfo->max = pCtx->end.key + 1; } } } @@ -4918,6 +4922,16 @@ int32_t mavgFunction(SqlFunctionCtx* pCtx) { return numOfElems; } +static SSampleInfo* getSampleOutputInfo(SqlFunctionCtx* pCtx) { + SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); + SSampleInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo); + + pInfo->data = (char*)pInfo + sizeof(SSampleInfo); + pInfo->tuplePos = (STuplePos*)((char*)pInfo + sizeof(SSampleInfo) + pInfo->samples * pInfo->colBytes); + + return pInfo; +} + bool getSampleFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) { SColumnNode* pCol = (SColumnNode*)nodesListGetNode(pFunc->pParameterList, 0); SValueNode* pVal = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1); @@ -4956,7 +4970,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da if (pInfo->numSampled < pInfo->samples) { sampleAssignResult(pInfo, data, pInfo->numSampled); if (pCtx->subsidiaries.num > 0) { - doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[pInfo->numSampled]); + pInfo->tuplePos[pInfo->numSampled] = saveTupleData(pCtx, index, pCtx->pSrcBlock); } pInfo->numSampled++; } else { @@ -4964,7 +4978,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da if (j < pInfo->samples) { sampleAssignResult(pInfo, data, j); if (pCtx->subsidiaries.num > 0) { - doCopyTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]); + updateTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]); } } } @@ -4972,7 +4986,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da int32_t sampleFunction(SqlFunctionCtx* pCtx) { SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); - SSampleInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo); + SSampleInfo* pInfo = getSampleOutputInfo(pCtx); SInputColumnInfoData* pInput = &pCtx->input; @@ -4987,7 +5001,7 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) { } if (pInfo->numSampled == 0 && pCtx->subsidiaries.num > 0 && !pInfo->nullTupleSaved) { - doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pInfo->nullTuplePos); + pInfo->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); pInfo->nullTupleSaved = true; } @@ -4998,7 +5012,7 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) { int32_t sampleFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx); - SSampleInfo* pInfo = GET_ROWCELL_INTERBUF(pEntryInfo); + SSampleInfo* pInfo = getSampleOutputInfo(pCtx); pEntryInfo->complete = true; int32_t slotId = pCtx->pExpr->base.resSchema.slotId; @@ -5563,6 +5577,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { if (pCtx->end.key != INT64_MIN) { pInfo->dOutput += twa_get_area(pInfo->p, pCtx->end); pInfo->p = pCtx->end; + numOfElems += 1; } pInfo->win.ekey = pInfo->p.key; diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c index 152a970c48eb5fb374f8806062d264e53b88f664..26735fa263cfed15ead940493b3c1eadf0e29c70 100644 --- a/source/libs/function/src/functionMgt.c +++ b/source/libs/function/src/functionMgt.c @@ -101,6 +101,14 @@ bool fmIsBuiltinFunc(const char* pFunc) { return NULL != taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc, strlen(pFunc)); } +EFunctionType fmGetFuncType(const char* pFunc) { + void* pVal = taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc, strlen(pFunc)); + if (NULL != pVal) { + return funcMgtBuiltins[*(int32_t*)pVal].type; + } + return FUNCTION_TYPE_UDF; +} + EFuncDataRequired fmFuncDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow) { if (fmIsUserDefinedFunc(pFunc->funcId) || pFunc->funcId < 0 || pFunc->funcId >= funcMgtBuiltinsNum) { return FUNC_DATA_REQUIRED_DATA_LOAD; diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index 517253dc01691754425bd93c40bfef2a2750eed5..4c58c0abe50e5784314445934618265231d4805a 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -33,13 +33,13 @@ static SFilePage *loadDataFromFilePage(tMemBucket *pMemBucket, int32_t slotIdx) SFilePage *buffer = (SFilePage *)taosMemoryCalloc(1, pMemBucket->bytes * pMemBucket->pSlots[slotIdx].info.size + sizeof(SFilePage)); int32_t groupId = getGroupId(pMemBucket->numOfSlots, slotIdx, pMemBucket->times); - SIDList list = getDataBufPagesIdList(pMemBucket->pBuffer, groupId); + SArray* pIdList = *(SArray**)taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId)); int32_t offset = 0; - for(int32_t i = 0; i < list->size; ++i) { - struct SPageInfo* pgInfo = *(struct SPageInfo**) taosArrayGet(list, i); + for(int32_t i = 0; i < taosArrayGetSize(pIdList); ++i) { + int32_t* pageId = taosArrayGet(pIdList, i); - SFilePage* pg = getBufPage(pMemBucket->pBuffer, getPageId(pgInfo)); + SFilePage* pg = getBufPage(pMemBucket->pBuffer, *pageId); memcpy(buffer->data + offset, pg->data, (size_t)(pg->num * pMemBucket->bytes)); offset += (int32_t)(pg->num * pMemBucket->bytes); @@ -97,11 +97,11 @@ double findOnlyResult(tMemBucket *pMemBucket) { } int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times); - SIDList list = getDataBufPagesIdList(pMemBucket->pBuffer, groupId); + SArray* list = *(SArray**)taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId)); assert(list->size == 1); - struct SPageInfo* pgInfo = (struct SPageInfo*) taosArrayGetP(list, 0); - SFilePage* pPage = getBufPage(pMemBucket->pBuffer, getPageId(pgInfo)); + int32_t* pageId = taosArrayGet(list, 0); + SFilePage* pPage = getBufPage(pMemBucket->pBuffer, *pageId); assert(pPage->num == 1); double v = 0; @@ -233,7 +233,7 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, pBucket->times = 1; pBucket->maxCapacity = 200000; - + pBucket->groupPagesMap = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); if (setBoundingBox(&pBucket->range, pBucket->type, minval, maxval) != 0) { // qError("MemBucket:%p, invalid value range: %f-%f", pBucket, minval, maxval); taosMemoryFree(pBucket); @@ -280,8 +280,16 @@ void tMemBucketDestroy(tMemBucket *pBucket) { return; } + void* p = taosHashIterate(pBucket->groupPagesMap, NULL); + while(p) { + SArray** p1 = p; + p = taosHashIterate(pBucket->groupPagesMap, p); + taosArrayDestroy(*p1); + } + destroyDiskbasedBuf(pBucket->pBuffer); taosMemoryFreeClear(pBucket->pSlots); + taosHashCleanup(pBucket->groupPagesMap); taosMemoryFreeClear(pBucket); } @@ -357,8 +365,16 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) { pSlot->info.data = NULL; } - pSlot->info.data = getNewBufPage(pBucket->pBuffer, groupId, &pageId); + SArray* pPageIdList = (SArray*)taosHashGet(pBucket->groupPagesMap, &groupId, sizeof(groupId)); + if (pPageIdList == NULL) { + SArray* pList = taosArrayInit(4, sizeof(int32_t)); + taosHashPut(pBucket->groupPagesMap, &groupId, sizeof(groupId), &pList, POINTER_BYTES); + pPageIdList = pList; + } + + pSlot->info.data = getNewBufPage(pBucket->pBuffer, &pageId); pSlot->info.pageId = pageId; + taosArrayPush(pPageIdList, &pageId); } memcpy(pSlot->info.data->data + pSlot->info.data->num * pBucket->bytes, d, pBucket->bytes); @@ -476,7 +492,7 @@ double getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction) resetSlotInfo(pMemBucket); int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times - 1); - SIDList list = getDataBufPagesIdList(pMemBucket->pBuffer, groupId); + SIDList list = taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId)); assert(list->size > 0); for (int32_t f = 0; f < list->size; ++f) { diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index 1cbc78df48b1cbeb5d1645dcd945168f21d25ba6..5b27e030b92b03a3450cf2a864a406209c1a9dcd 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -84,6 +84,7 @@ typedef struct SUdf { TUdfAggStartFunc aggStartFunc; TUdfAggProcessFunc aggProcFunc; TUdfAggFinishFunc aggFinishFunc; + TUdfAggMergeFunc aggMergeFunc; TUdfInitFunc initFunc; TUdfDestroyFunc destroyFunc; @@ -271,6 +272,15 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) { break; } + case TSDB_UDF_CALL_AGG_MERGE: { + SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize), .bufLen = udf->bufSize, .numOfResult = 0}; + code = udf->aggMergeFunc(&call->interBuf, &call->interBuf2, &outBuf); + freeUdfInterBuf(&call->interBuf); + freeUdfInterBuf(&call->interBuf2); + subRsp->resultBuf = outBuf; + + break; + } case TSDB_UDF_CALL_AGG_FIN: { SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize), .bufLen = udf->bufSize, .numOfResult = 0}; code = udf->aggFinishFunc(&call->interBuf, &outBuf); @@ -309,6 +319,10 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) { freeUdfInterBuf(&subRsp->resultBuf); break; } + case TSDB_UDF_CALL_AGG_MERGE: { + freeUdfInterBuf(&subRsp->resultBuf); + break; + } case TSDB_UDF_CALL_AGG_FIN: { freeUdfInterBuf(&subRsp->resultBuf); break; @@ -560,7 +574,11 @@ int32_t udfdLoadUdf(char *udfName, SUdf *udf) { strncpy(finishFuncName, processFuncName, strlen(processFuncName)); strncat(finishFuncName, finishSuffix, strlen(finishSuffix)); uv_dlsym(&udf->lib, finishFuncName, (void **)(&udf->aggFinishFunc)); - // TODO: merge + char mergeFuncName[TSDB_FUNC_NAME_LEN + 6] = {0}; + char *mergeSuffix = "_merge"; + strncpy(finishFuncName, processFuncName, strlen(processFuncName)); + strncat(finishFuncName, mergeSuffix, strlen(mergeSuffix)); + uv_dlsym(&udf->lib, finishFuncName, (void **)(&udf->aggMergeFunc)); } return 0; } diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c index 21aeaba70bb02f6f44c2fc6d40d07515201ee25a..75844ce76f1cb50d6847709309dae1ed3f77bf70 100644 --- a/source/libs/index/src/indexFilter.c +++ b/source/libs/index/src/indexFilter.c @@ -255,6 +255,13 @@ static int32_t sifInitOperParams(SIFParam **params, SOperatorNode *node, SIFCtx if (node->opType == OP_TYPE_JSON_GET_VALUE) { return code; } + if ((node->pLeft != NULL && nodeType(node->pLeft) == QUERY_NODE_COLUMN) && + (node->pRight != NULL && nodeType(node->pRight) == QUERY_NODE_VALUE)) { + SColumnNode *cn = (SColumnNode *)(node->pLeft); + if (cn->node.resType.type == TSDB_DATA_TYPE_JSON) { + SIF_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + } SIFParam *paramList = taosMemoryCalloc(nParam, sizeof(SIFParam)); if (NULL == paramList) { diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 9390d129df4f536070320246555d690ab8b4972c..83bccbffb4b973fe3a4b720219ab0bb91d6f05b6 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -545,6 +545,7 @@ static int32_t physiSysTableScanCopy(const SSystemTableScanPhysiNode* pSrc, SSys COPY_OBJECT_FIELD(mgmtEpSet, sizeof(SEpSet)); COPY_SCALAR_FIELD(showRewrite); COPY_SCALAR_FIELD(accountId); + COPY_SCALAR_FIELD(sysInfo); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index a6546f3299e5bb5e4a99f5fe829492f42bc3b4b5..822bdec365e6f5128d7c36cf6b7d765eeb2488de 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1654,6 +1654,7 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) { static const char* jkSysTableScanPhysiPlanMnodeEpSet = "MnodeEpSet"; static const char* jkSysTableScanPhysiPlanShowRewrite = "ShowRewrite"; static const char* jkSysTableScanPhysiPlanAccountId = "AccountId"; +static const char* jkSysTableScanPhysiPlanSysInfo = "SysInfo"; static int32_t physiSysTableScanNodeToJson(const void* pObj, SJson* pJson) { const SSystemTableScanPhysiNode* pNode = (const SSystemTableScanPhysiNode*)pObj; @@ -1668,6 +1669,9 @@ static int32_t physiSysTableScanNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkSysTableScanPhysiPlanAccountId, pNode->accountId); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkSysTableScanPhysiPlanSysInfo, pNode->sysInfo); + } return code; } @@ -1684,7 +1688,9 @@ static int32_t jsonToPhysiSysTableScanNode(const SJson* pJson, void* pObj) { } if (TSDB_CODE_SUCCESS == code) { tjsonGetNumberValue(pJson, jkSysTableScanPhysiPlanAccountId, pNode->accountId, code); - ; + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkSysTableScanPhysiPlanSysInfo, &pNode->sysInfo); } return code; @@ -4673,7 +4679,6 @@ static int32_t jsonToNode(const SJson* pJson, void* pObj) { int32_t code; tjsonGetNumberValue(pJson, jkNodeType, pNode->type, code); - ; if (TSDB_CODE_SUCCESS == code) { code = tjsonToObject(pJson, nodesNodeName(pNode->type), jsonToSpecificNode, pNode); if (TSDB_CODE_SUCCESS != code) { diff --git a/source/libs/nodes/src/nodesToSQLFuncs.c b/source/libs/nodes/src/nodesToSQLFuncs.c index 3b129740e81b4743dbae25dd82da36ac2a49407e..9325d0288636ca7e22fe4fdd3a8e50ff90cdf0de 100644 --- a/source/libs/nodes/src/nodesToSQLFuncs.c +++ b/source/libs/nodes/src/nodesToSQLFuncs.c @@ -21,36 +21,89 @@ #include "taoserror.h" #include "thash.h" -char *gOperatorStr[] = {NULL, - "+", - "-", - "*", - "/", - "%", - "-", - "&", - "|", - ">", - ">=", - "<", - "<=", - "=", - "<>", - "IN", - "NOT IN", - "LIKE", - "NOT LIKE", - "MATCH", - "NMATCH", - "IS NULL", - "IS NOT NULL", - "IS TRUE", - "IS FALSE", - "IS UNKNOWN", - "IS NOT TRUE", - "IS NOT FALSE", - "IS NOT UNKNOWN"}; -char *gLogicConditionStr[] = {"AND", "OR", "NOT"}; +const char *operatorTypeStr(EOperatorType type) { + switch (type) { + case OP_TYPE_ADD: + return "+"; + case OP_TYPE_SUB: + return "-"; + case OP_TYPE_MULTI: + return "*"; + case OP_TYPE_DIV: + return "/"; + case OP_TYPE_REM: + return "%"; + case OP_TYPE_MINUS: + return "-"; + case OP_TYPE_BIT_AND: + return "&"; + case OP_TYPE_BIT_OR: + return "|"; + case OP_TYPE_GREATER_THAN: + return ">"; + case OP_TYPE_GREATER_EQUAL: + return ">="; + case OP_TYPE_LOWER_THAN: + return "<"; + case OP_TYPE_LOWER_EQUAL: + return "<="; + case OP_TYPE_EQUAL: + return "="; + case OP_TYPE_NOT_EQUAL: + return "<>"; + case OP_TYPE_IN: + return "IN"; + case OP_TYPE_NOT_IN: + return "NOT IN"; + case OP_TYPE_LIKE: + return "LIKE"; + case OP_TYPE_NOT_LIKE: + return "NOT LIKE"; + case OP_TYPE_MATCH: + return "MATCH"; + case OP_TYPE_NMATCH: + return "NMATCH"; + case OP_TYPE_IS_NULL: + return "IS NULL"; + case OP_TYPE_IS_NOT_NULL: + return "IS NOT NULL"; + case OP_TYPE_IS_TRUE: + return "IS TRUE"; + case OP_TYPE_IS_FALSE: + return "IS FALSE"; + case OP_TYPE_IS_UNKNOWN: + return "IS UNKNOWN"; + case OP_TYPE_IS_NOT_TRUE: + return "IS NOT TRUE"; + case OP_TYPE_IS_NOT_FALSE: + return "IS NOT FALSE"; + case OP_TYPE_IS_NOT_UNKNOWN: + return "IS NOT UNKNOWN"; + case OP_TYPE_JSON_GET_VALUE: + return "=>"; + case OP_TYPE_JSON_CONTAINS: + return "CONTAINS"; + case OP_TYPE_ASSIGN: + return "="; + default: + break; + } + return "UNKNOWN"; +} + +const char *logicConditionTypeStr(ELogicConditionType type) { + switch (type) { + case LOGIC_COND_TYPE_AND: + return "AND"; + case LOGIC_COND_TYPE_OR: + return "OR"; + case LOGIC_COND_TYPE_NOT: + return "NOT"; + default: + break; + } + return "UNKNOWN"; +} int32_t nodesNodeToSQL(SNode *pNode, char *buf, int32_t bufSize, int32_t *len) { switch (pNode->type) { @@ -82,7 +135,12 @@ int32_t nodesNodeToSQL(SNode *pNode, char *buf, int32_t bufSize, int32_t *len) { NODES_ERR_RET(TSDB_CODE_QRY_APP_ERROR); } - *len += snprintf(buf + *len, bufSize - *len, "%s", t); + int32_t tlen = strlen(t); + if (tlen > 32) { + *len += snprintf(buf + *len, bufSize - *len, "%.*s...%s", 32, t, t + tlen - 1); + } else { + *len += snprintf(buf + *len, bufSize - *len, "%s", t); + } taosMemoryFree(t); return TSDB_CODE_SUCCESS; @@ -94,12 +152,7 @@ int32_t nodesNodeToSQL(SNode *pNode, char *buf, int32_t bufSize, int32_t *len) { NODES_ERR_RET(nodesNodeToSQL(pOpNode->pLeft, buf, bufSize, len)); } - if (pOpNode->opType >= (sizeof(gOperatorStr) / sizeof(gOperatorStr[0]))) { - nodesError("unknown operation type:%d", pOpNode->opType); - NODES_ERR_RET(TSDB_CODE_QRY_APP_ERROR); - } - - *len += snprintf(buf + *len, bufSize - *len, " %s ", gOperatorStr[pOpNode->opType]); + *len += snprintf(buf + *len, bufSize - *len, " %s ", operatorTypeStr(pOpNode->opType)); if (pOpNode->pRight) { NODES_ERR_RET(nodesNodeToSQL(pOpNode->pRight, buf, bufSize, len)); @@ -118,7 +171,7 @@ int32_t nodesNodeToSQL(SNode *pNode, char *buf, int32_t bufSize, int32_t *len) { FOREACH(node, pLogicNode->pParameterList) { if (!first) { - *len += snprintf(buf + *len, bufSize - *len, " %s ", gLogicConditionStr[pLogicNode->condType]); + *len += snprintf(buf + *len, bufSize - *len, " %s ", logicConditionTypeStr(pLogicNode->condType)); } NODES_ERR_RET(nodesNodeToSQL(node, buf, bufSize, len)); first = false; @@ -151,12 +204,17 @@ int32_t nodesNodeToSQL(SNode *pNode, char *buf, int32_t bufSize, int32_t *len) { SNodeListNode *pListNode = (SNodeListNode *)pNode; SNode *node = NULL; bool first = true; + int32_t num = 0; *len += snprintf(buf + *len, bufSize - *len, "("); FOREACH(node, pListNode->pNodeList) { if (!first) { *len += snprintf(buf + *len, bufSize - *len, ", "); + if (++num >= 10) { + *len += snprintf(buf + *len, bufSize - *len, "..."); + break; + } } NODES_ERR_RET(nodesNodeToSQL(node, buf, bufSize, len)); first = false; diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index f8ba6e69019eb229164f77933dac11b27bd1c2b3..d13057a93e824c2b94d94a006664b4cbc4c2f870 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -817,6 +817,7 @@ void nodesDestroyNode(SNode* pNode) { destroyLogicNode((SLogicNode*)pLogicNode); nodesDestroyNode(pLogicNode->pWStartTs); nodesDestroyNode(pLogicNode->pValues); + nodesDestroyList(pLogicNode->pFillExprs); break; } case QUERY_NODE_LOGIC_PLAN_SORT: { diff --git a/source/libs/parser/inc/parUtil.h b/source/libs/parser/inc/parUtil.h index 2249bc7823a49589e99f1714d06401b419c3d72d..308afd467f1248c14dac9d8abea638cb42444936 100644 --- a/source/libs/parser/inc/parUtil.h +++ b/source/libs/parser/inc/parUtil.h @@ -22,6 +22,7 @@ extern "C" { #include "catalog.h" #include "os.h" +#include "parser.h" #include "query.h" #define parserFatal(param, ...) qFatal("PARSER: " param, ##__VA_ARGS__) @@ -44,18 +45,37 @@ typedef struct SParseTablesMetaReq { SHashObj* pTables; } SParseTablesMetaReq; +typedef enum ECatalogReqType { + CATALOG_REQ_TYPE_META = 1, + CATALOG_REQ_TYPE_VGROUP, + CATALOG_REQ_TYPE_BOTH +} ECatalogReqType; + +typedef struct SInsertTablesMetaReq { + char dbFName[TSDB_DB_FNAME_LEN]; + SArray* pTableMetaPos; + SArray* pTableMetaReq; // element is SName + SArray* pTableVgroupPos; + SArray* pTableVgroupReq; // element is SName +} SInsertTablesMetaReq; + typedef struct SParseMetaCache { - SHashObj* pTableMeta; // key is tbFName, element is STableMeta* - SHashObj* pDbVgroup; // key is dbFName, element is SArray* - SHashObj* pTableVgroup; // key is tbFName, element is SVgroupInfo* - SHashObj* pDbCfg; // key is tbFName, element is SDbCfgInfo* - SHashObj* pDbInfo; // key is tbFName, element is SDbInfo* - SHashObj* pUserAuth; // key is SUserAuthInfo serialized string, element is bool indicating whether or not to pass - SHashObj* pUdf; // key is funcName, element is SFuncInfo* - SHashObj* pTableIndex; // key is tbFName, element is SArray* - SHashObj* pTableCfg; // key is tbFName, element is STableCfg* - SArray* pDnodes; // element is SEpSet - bool dnodeRequired; + SHashObj* pTableMeta; // key is tbFName, element is STableMeta* + SHashObj* pDbVgroup; // key is dbFName, element is SArray* + SHashObj* pTableVgroup; // key is tbFName, element is SVgroupInfo* + SHashObj* pDbCfg; // key is tbFName, element is SDbCfgInfo* + SHashObj* pDbInfo; // key is tbFName, element is SDbInfo* + SHashObj* pUserAuth; // key is SUserAuthInfo serialized string, element is bool indicating whether or not to pass + SHashObj* pUdf; // key is funcName, element is SFuncInfo* + SHashObj* pTableIndex; // key is tbFName, element is SArray* + SHashObj* pTableCfg; // key is tbFName, element is STableCfg* + SArray* pDnodes; // element is SEpSet + bool dnodeRequired; + SHashObj* pInsertTables; // key is dbName, element is SInsertTablesMetaReq*, for insert + const char* pUser; + const SArray* pTableMetaData; // pRes = STableMeta* + const SArray* pTableVgroupData; // pRes = SVgroupInfo* + int32_t sqlTableNum; } SParseMetaCache; int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...); @@ -72,8 +92,9 @@ STableMeta* tableMetaDup(const STableMeta* pTableMeta); int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen); -int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq); -int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache); +int32_t buildCatalogReq(SParseContext* pCxt, const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq); +int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache, + bool insertValuesStmt); int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache); int32_t reserveTableMetaInCacheExt(const SName* pName, SParseMetaCache* pMetaCache); int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache); @@ -100,6 +121,12 @@ int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFun int32_t getTableIndexFromCache(SParseMetaCache* pMetaCache, const SName* pName, SArray** pIndexes); int32_t getTableCfgFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableCfg** pOutput); int32_t getDnodeListFromCache(SParseMetaCache* pMetaCache, SArray** pDnodes); +int32_t reserveTableMetaInCacheForInsert(const SName* pName, ECatalogReqType reqType, int32_t tableNo, + SParseMetaCache* pMetaCache); +int32_t getTableMetaFromCacheForInsert(SArray* pTableMetaPos, SParseMetaCache* pMetaCache, int32_t tableNo, + STableMeta** pMeta); +int32_t getTableVgroupFromCacheForInsert(SArray* pTableVgroupPos, SParseMetaCache* pMetaCache, int32_t tableNo, + SVgroupInfo* pVgroup); void destoryParseMetaCache(SParseMetaCache* pMetaCache, bool request); #ifdef __cplusplus diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index 56e68d8374518ab7494371151513c099bc37ab80..9bff061d02fbfa8d5795dff82c9ec93b7093f96d 100644 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -495,12 +495,9 @@ bufsize_opt(A) ::= BUFSIZE NK_INTEGER(B). /************************************************ create/drop stream **************************************************/ cmd ::= CREATE STREAM not_exists_opt(E) stream_name(A) - stream_options(B) into_opt(C) AS query_expression(D). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, D); } + stream_options(B) INTO full_table_name(C) AS query_expression(D). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, D); } cmd ::= DROP STREAM exists_opt(A) stream_name(B). { pCxt->pRootNode = createDropStreamStmt(pCxt, A, &B); } -into_opt(A) ::= . { A = NULL; } -into_opt(A) ::= INTO full_table_name(B). { A = B; } - stream_options(A) ::= . { A = createStreamOptions(pCxt); } stream_options(A) ::= stream_options(B) TRIGGER AT_ONCE. { ((SStreamOptions*)B)->triggerType = STREAM_TRIGGER_AT_ONCE; A = B; } stream_options(A) ::= stream_options(B) TRIGGER WINDOW_CLOSE. { ((SStreamOptions*)B)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; A = B; } diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index ffa7729745021be10cfc22aa66dab7f7b3abccb3..4dcd2bba5aa6f13ef26995162e6bf38fe0a43b27 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -97,16 +97,23 @@ typedef struct SCollectMetaKeyCxt { typedef struct SCollectMetaKeyFromExprCxt { SCollectMetaKeyCxt* pComCxt; + bool hasLastRow; int32_t errCode; } SCollectMetaKeyFromExprCxt; static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt); static EDealRes collectMetaKeyFromFunction(SCollectMetaKeyFromExprCxt* pCxt, SFunctionNode* pFunc) { - if (fmIsBuiltinFunc(pFunc->functionName)) { - return DEAL_RES_CONTINUE; + switch (fmGetFuncType(pFunc->functionName)) { + case FUNCTION_TYPE_LAST_ROW: + pCxt->hasLastRow = true; + break; + case FUNCTION_TYPE_UDF: + pCxt->errCode = reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache); + break; + default: + break; } - pCxt->errCode = reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache); return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR; } @@ -136,9 +143,6 @@ static int32_t collectMetaKeyFromRealTableImpl(SCollectMetaKeyCxt* pCxt, const c if (TSDB_CODE_SUCCESS == code && (0 == strcmp(pTable, TSDB_INS_TABLE_DNODE_VARIABLES))) { code = reserveDnodeRequiredInCache(pCxt->pMetaCache); } - if (TSDB_CODE_SUCCESS == code) { - code = reserveDbCfgInCache(pCxt->pParseCxt->acctId, pDb, pCxt->pMetaCache); - } return code; } @@ -185,9 +189,19 @@ static int32_t collectMetaKeyFromSetOperator(SCollectMetaKeyCxt* pCxt, SSetOpera return code; } +static int32_t reserveDbCfgForLastRow(SCollectMetaKeyCxt* pCxt, SNode* pTable) { + if (NULL == pTable || QUERY_NODE_REAL_TABLE != nodeType(pTable)) { + return TSDB_CODE_SUCCESS; + } + return reserveDbCfgInCache(pCxt->pParseCxt->acctId, ((SRealTableNode*)pTable)->table.dbName, pCxt->pMetaCache); +} + static int32_t collectMetaKeyFromSelect(SCollectMetaKeyCxt* pCxt, SSelectStmt* pStmt) { - SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS}; + SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .hasLastRow = false, .errCode = TSDB_CODE_SUCCESS}; nodesWalkSelectStmt(pStmt, SQL_CLAUSE_FROM, collectMetaKeyFromExprImpl, &cxt); + if (TSDB_CODE_SUCCESS == cxt.errCode && cxt.hasLastRow) { + cxt.errCode = reserveDbCfgForLastRow(pCxt, pStmt->pFromTable); + } return cxt.errCode; } @@ -365,7 +379,7 @@ static int32_t collectMetaKeyFromShowStables(SCollectMetaKeyCxt* pCxt, SShowStmt } static int32_t collectMetaKeyFromShowStreams(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { - return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS, + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STREAMS, pCxt->pMetaCache); } @@ -411,7 +425,7 @@ static int32_t collectMetaKeyFromShowVgroups(SCollectMetaKeyCxt* pCxt, SShowStmt } static int32_t collectMetaKeyFromShowTopics(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { - return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TOPICS, + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TOPICS, pCxt->pMetaCache); } @@ -464,6 +478,9 @@ static int32_t collectMetaKeyFromShowCreateTable(SCollectMetaKeyCxt* pCxt, SShow if (TSDB_CODE_SUCCESS == code) { code = reserveDbCfgInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache); } + if (TSDB_CODE_SUCCESS == code) { + code = reserveUserAuthInCacheExt(pCxt->pParseCxt->pUser, &name, AUTH_TYPE_READ, pCxt->pMetaCache); + } return code; } @@ -503,7 +520,7 @@ static int32_t collectMetaKeyFromShowBlockDist(SCollectMetaKeyCxt* pCxt, SShowTa } static int32_t collectMetaKeyFromShowSubscriptions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { - return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_SUBSCRIPTIONS, + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SUBSCRIPTIONS, pCxt->pMetaCache); } diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c index befc822808c7b50eeaea5753a61bb10ffef81523..d9a5761d99b7f04d5f2d4d9604ceee3faf76a896 100644 --- a/source/libs/parser/src/parAuthenticator.c +++ b/source/libs/parser/src/parAuthenticator.c @@ -96,6 +96,10 @@ static int32_t authInsert(SAuthCxt* pCxt, SInsertStmt* pInsert) { return code; } +static int32_t authShowCreateTable(SAuthCxt* pCxt, SShowCreateTableStmt* pStmt) { + return checkAuth(pCxt, pStmt->dbName, AUTH_TYPE_READ); +} + static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) { switch (nodeType(pStmt)) { case QUERY_NODE_SET_OPERATOR: @@ -108,6 +112,24 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) { return authDelete(pCxt, (SDeleteStmt*)pStmt); case QUERY_NODE_INSERT_STMT: return authInsert(pCxt, (SInsertStmt*)pStmt); + case QUERY_NODE_SHOW_DNODES_STMT: + case QUERY_NODE_SHOW_MNODES_STMT: + case QUERY_NODE_SHOW_MODULES_STMT: + case QUERY_NODE_SHOW_QNODES_STMT: + case QUERY_NODE_SHOW_SNODES_STMT: + case QUERY_NODE_SHOW_BNODES_STMT: + case QUERY_NODE_SHOW_CLUSTER_STMT: + case QUERY_NODE_SHOW_LICENCES_STMT: + case QUERY_NODE_SHOW_VGROUPS_STMT: + case QUERY_NODE_SHOW_VARIABLES_STMT: + case QUERY_NODE_SHOW_CREATE_DATABASE_STMT: + case QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT: + case QUERY_NODE_SHOW_VNODES_STMT: + case QUERY_NODE_SHOW_SCORES_STMT: + return !pCxt->pParseCxt->enableSysInfo ? TSDB_CODE_PAR_PERMISSION_DENIED : TSDB_CODE_SUCCESS; + case QUERY_NODE_SHOW_CREATE_TABLE_STMT: + case QUERY_NODE_SHOW_CREATE_STABLE_STMT: + return authShowCreateTable(pCxt, (SShowCreateTableStmt*)pStmt); default: break; } diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c index b7532173c8380e9628fa504c9ce476f4884967fc..049d1ef54526ba73cde02d82d3b1d8a6779286e6 100644 --- a/source/libs/parser/src/parInsert.c +++ b/source/libs/parser/src/parInsert.c @@ -73,6 +73,9 @@ typedef struct SInsertParseContext { SStmtCallback* pStmtCb; SParseMetaCache* pMetaCache; char sTableName[TSDB_TABLE_NAME_LEN]; + char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW]; + int64_t memElapsed; + int64_t parRowElapsed; } SInsertParseContext; typedef struct SInsertParseSyntaxCxt { @@ -122,6 +125,37 @@ static int32_t skipInsertInto(char** pSql, SMsgBuf* pMsg) { return TSDB_CODE_SUCCESS; } +static char* tableNameGetPosition(SToken* pToken, char target) { + bool inEscape = false; + bool inQuote = false; + char quotaStr = 0; + + for (uint32_t i = 0; i < pToken->n; ++i) { + if (*(pToken->z + i) == target && (!inEscape) && (!inQuote)) { + return pToken->z + i; + } + + if (*(pToken->z + i) == TS_ESCAPE_CHAR) { + if (!inQuote) { + inEscape = !inEscape; + } + } + + if (*(pToken->z + i) == '\'' || *(pToken->z + i) == '"') { + if (!inEscape) { + if (!inQuote) { + quotaStr = *(pToken->z + i); + inQuote = !inQuote; + } else if (quotaStr == *(pToken->z + i)) { + inQuote = !inQuote; + } + } + } + } + + return NULL; +} + static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, const char* dbName, SMsgBuf* pMsgBuf) { const char* msg1 = "name too long"; const char* msg2 = "invalid database name"; @@ -129,7 +163,7 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con const char* msg4 = "invalid table name"; int32_t code = TSDB_CODE_SUCCESS; - char* p = strnchr(pTableName->z, TS_PATH_DELIMITER[0], pTableName->n, true); + char* p = tableNameGetPosition(pTableName, TS_PATH_DELIMITER[0]); if (p != NULL) { // db has been specified in sql string so we ignore current db path assert(*p == TS_PATH_DELIMITER[0]); @@ -140,9 +174,9 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con } char name[TSDB_DB_FNAME_LEN] = {0}; strncpy(name, pTableName->z, dbLen); - dbLen = strdequote(name); + int32_t actualDbLen = strdequote(name); - code = tNameSetDbName(pName, acctId, name, dbLen); + code = tNameSetDbName(pName, acctId, name, actualDbLen); if (code != TSDB_CODE_SUCCESS) { return buildInvalidOperationMsg(pMsgBuf, msg1); } @@ -203,10 +237,11 @@ static int32_t checkAuth(SInsertParseContext* pCxt, char* pDbFname, bool* pPass) return catalogChkAuth(pBasicCtx->pCatalog, &conn, pBasicCtx->pUser, pDbFname, AUTH_TYPE_WRITE, pPass); } -static int32_t getTableSchema(SInsertParseContext* pCxt, SName* pTbName, bool isStb, STableMeta** pTableMeta) { +static int32_t getTableSchema(SInsertParseContext* pCxt, int32_t tbNo, SName* pTbName, bool isStb, + STableMeta** pTableMeta) { SParseContext* pBasicCtx = pCxt->pComCxt; if (pBasicCtx->async) { - return getTableMetaFromCache(pCxt->pMetaCache, pTbName, pTableMeta); + return getTableMetaFromCacheForInsert(pBasicCtx->pTableMetaPos, pCxt->pMetaCache, tbNo, pTableMeta); } SRequestConnInfo conn = {.pTrans = pBasicCtx->pTransporter, .requestId = pBasicCtx->requestId, @@ -219,10 +254,10 @@ static int32_t getTableSchema(SInsertParseContext* pCxt, SName* pTbName, bool is return catalogGetTableMeta(pBasicCtx->pCatalog, &conn, pTbName, pTableMeta); } -static int32_t getTableVgroup(SInsertParseContext* pCxt, SName* pTbName, SVgroupInfo* pVg) { +static int32_t getTableVgroup(SInsertParseContext* pCxt, int32_t tbNo, SName* pTbName, SVgroupInfo* pVg) { SParseContext* pBasicCtx = pCxt->pComCxt; if (pBasicCtx->async) { - return getTableVgroupFromCache(pCxt->pMetaCache, pTbName, pVg); + return getTableVgroupFromCacheForInsert(pBasicCtx->pTableVgroupPos, pCxt->pMetaCache, tbNo, pVg); } SRequestConnInfo conn = {.pTrans = pBasicCtx->pTransporter, .requestId = pBasicCtx->requestId, @@ -231,28 +266,22 @@ static int32_t getTableVgroup(SInsertParseContext* pCxt, SName* pTbName, SVgroup return catalogGetTableHashVgroup(pBasicCtx->pCatalog, &conn, pTbName, pVg); } -static int32_t getTableMetaImpl(SInsertParseContext* pCxt, SName* name, char* dbFname, bool isStb) { - bool pass = false; - CHECK_CODE(checkAuth(pCxt, dbFname, &pass)); - if (!pass) { - return TSDB_CODE_PAR_PERMISSION_DENIED; - } - - CHECK_CODE(getTableSchema(pCxt, name, isStb, &pCxt->pTableMeta)); +static int32_t getTableMetaImpl(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* dbFname, bool isStb) { + CHECK_CODE(getTableSchema(pCxt, tbNo, name, isStb, &pCxt->pTableMeta)); if (!isStb) { SVgroupInfo vg; - CHECK_CODE(getTableVgroup(pCxt, name, &vg)); + CHECK_CODE(getTableVgroup(pCxt, tbNo, name, &vg)); CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg))); } return TSDB_CODE_SUCCESS; } -static int32_t getTableMeta(SInsertParseContext* pCxt, SName* name, char* dbFname) { - return getTableMetaImpl(pCxt, name, dbFname, false); +static int32_t getTableMeta(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* dbFname) { + return getTableMetaImpl(pCxt, tbNo, name, dbFname, false); } -static int32_t getSTableMeta(SInsertParseContext* pCxt, SName* name, char* dbFname) { - return getTableMetaImpl(pCxt, name, dbFname, true); +static int32_t getSTableMeta(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* dbFname) { + return getTableMetaImpl(pCxt, tbNo, name, dbFname, true); } static int32_t getDBCfg(SInsertParseContext* pCxt, const char* pDbFName, SDbCfgInfo* pInfo) { @@ -473,6 +502,10 @@ static int32_t parseValueToken(char** end, SToken* pToken, SSchema* pSchema, int return func(pMsgBuf, NULL, 0, param); } + if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) { + return buildSyntaxErrMsg(pMsgBuf, "invalid numeric data", pToken->z); + } + switch (pSchema->type) { case TSDB_DATA_TYPE_BOOL: { if ((pToken->type == TK_NK_BOOL || pToken->type == TK_NK_STRING) && (pToken->n != 0)) { @@ -683,6 +716,11 @@ static int32_t parseBoundColumns(SInsertParseContext* pCxt, SParsedDataColInfo* break; } + char tmpTokenBuf[TSDB_COL_NAME_LEN + 2] = {0}; // used for deleting Escape character backstick(`) + strncpy(tmpTokenBuf, sToken.z, sToken.n); + sToken.z = tmpTokenBuf; + sToken.n = strdequote(sToken.z); + col_id_t t = lastColIdx + 1; col_id_t index = findCol(&sToken, t, nCols, pSchema); if (index < 0 && t > 0) { @@ -1028,13 +1066,13 @@ end: return code; } -static int32_t storeTableMeta(SInsertParseContext* pCxt, SHashObj* pHash, SName* pTableName, const char* pName, - int32_t len, STableMeta* pMeta) { +static int32_t storeTableMeta(SInsertParseContext* pCxt, SHashObj* pHash, int32_t tbNo, SName* pTableName, + const char* pName, int32_t len, STableMeta* pMeta) { SVgroupInfo vg; - CHECK_CODE(getTableVgroup(pCxt, pTableName, &vg)); + CHECK_CODE(getTableVgroup(pCxt, tbNo, pTableName, &vg)); CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg))); - pMeta->uid = 0; + pMeta->uid = tbNo; pMeta->vgId = vg.vgId; pMeta->tableType = TSDB_CHILD_TABLE; @@ -1084,7 +1122,7 @@ static int32_t ignoreAutoCreateTableClause(SInsertParseContext* pCxt) { } // pSql -> stb_name [(tag1_name, ...)] TAGS (tag1_value, ...) -static int32_t parseUsingClause(SInsertParseContext* pCxt, SName* name, char* tbFName) { +static int32_t parseUsingClause(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* tbFName) { int32_t len = strlen(tbFName); STableMeta** pMeta = taosHashGet(pCxt->pSubTableHashObj, tbFName, len); if (NULL != pMeta) { @@ -1102,11 +1140,11 @@ static int32_t parseUsingClause(SInsertParseContext* pCxt, SName* name, char* tb tNameGetFullDbName(&sname, dbFName); strcpy(pCxt->sTableName, sname.tname); - CHECK_CODE(getSTableMeta(pCxt, &sname, dbFName)); + CHECK_CODE(getSTableMeta(pCxt, tbNo, &sname, dbFName)); if (TSDB_SUPER_TABLE != pCxt->pTableMeta->tableType) { return buildInvalidOperationMsg(&pCxt->msg, "create table only from super table is allowed"); } - CHECK_CODE(storeTableMeta(pCxt, pCxt->pSubTableHashObj, name, tbFName, len, pCxt->pTableMeta)); + CHECK_CODE(storeTableMeta(pCxt, pCxt->pSubTableHashObj, tbNo, name, tbFName, len, pCxt->pTableMeta)); SSchema* pTagsSchema = getTableTagSchema(pCxt->pTableMeta); setBoundColumnInfo(&pCxt->tags, pTagsSchema, getNumOfTags(pCxt->pTableMeta)); @@ -1195,7 +1233,7 @@ static int parseOneRow(SInsertParseContext* pCxt, STableDataBlocks* pDataBlocks, tdSRowEnd(pBuilder); *gotRow = true; - + #ifdef TD_DEBUG_PRINT_ROW STSchema* pSTSchema = tdGetSTSChemaFromSSChema(schema, spd->numOfCols, 1); tdSRowPrint(row, pSTSchema, __func__); @@ -1214,7 +1252,7 @@ static int32_t parseValues(SInsertParseContext* pCxt, STableDataBlocks* pDataBlo CHECK_CODE(initRowBuilder(&pDataBlock->rowBuilder, pDataBlock->pTableMeta->sversion, &pDataBlock->boundColumnInfo)); (*numOfRows) = 0; - char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character: \\, \', \" + // char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character: \\, \', \" SToken sToken; while (1) { int32_t index = 0; @@ -1232,7 +1270,7 @@ static int32_t parseValues(SInsertParseContext* pCxt, STableDataBlocks* pDataBlo } bool gotRow = false; - CHECK_CODE(parseOneRow(pCxt, pDataBlock, tinfo.precision, &gotRow, tmpTokenBuf)); + CHECK_CODE(parseOneRow(pCxt, pDataBlock, tinfo.precision, &gotRow, pCxt->tmpTokenBuf)); if (gotRow) { pDataBlock->size += extendedRowSize; // len; } @@ -1347,7 +1385,9 @@ static int32_t parseDataFromFile(SInsertParseContext* pCxt, SToken filePath, STa } static void destroyInsertParseContextForTable(SInsertParseContext* pCxt) { - taosMemoryFreeClear(pCxt->pTableMeta); + if (!pCxt->pComCxt->async) { + taosMemoryFreeClear(pCxt->pTableMeta); + } destroyBoundColumnInfo(&pCxt->tags); tdDestroySVCreateTbReq(&pCxt->createTblReq); } @@ -1365,6 +1405,20 @@ static void destroyInsertParseContext(SInsertParseContext* pCxt) { destroyBlockArrayList(pCxt->pVgDataBlocks); } +static int32_t parseTableName(SInsertParseContext* pCxt, SToken* pTbnameToken, SName* pName, char* pDbFName, + char* pTbFName) { + int32_t code = createSName(pName, pTbnameToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg); + if (TSDB_CODE_SUCCESS == code) { + tNameExtractFullName(pName, pTbFName); + code = taosHashPut(pCxt->pTableNameHashObj, pTbFName, strlen(pTbFName), pName, sizeof(SName)); + } + if (TSDB_CODE_SUCCESS == code) { + tNameGetFullDbName(pName, pDbFName); + code = taosHashPut(pCxt->pDbFNameHashObj, pDbFName, strlen(pDbFName), pDbFName, TSDB_DB_FNAME_LEN); + } + return code; +} + // tb_name // [USING stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)] // [(field1_name, ...)] @@ -1372,7 +1426,9 @@ static void destroyInsertParseContext(SInsertParseContext* pCxt) { // [...]; static int32_t parseInsertBody(SInsertParseContext* pCxt) { int32_t tbNum = 0; + SName name; char tbFName[TSDB_TABLE_FNAME_LEN]; + char dbFName[TSDB_DB_FNAME_LEN]; bool autoCreateTbl = false; // for each table @@ -1415,20 +1471,15 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) { SToken tbnameToken = sToken; NEXT_TOKEN(pCxt->pSql, sToken); - SName name; - CHECK_CODE(createSName(&name, &tbnameToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg)); - - tNameExtractFullName(&name, tbFName); - CHECK_CODE(taosHashPut(pCxt->pTableNameHashObj, tbFName, strlen(tbFName), &name, sizeof(SName))); - char dbFName[TSDB_DB_FNAME_LEN]; - tNameGetFullDbName(&name, dbFName); - CHECK_CODE(taosHashPut(pCxt->pDbFNameHashObj, dbFName, strlen(dbFName), dbFName, sizeof(dbFName))); + if (!pCxt->pComCxt->async || TK_USING == sToken.type) { + CHECK_CODE(parseTableName(pCxt, &tbnameToken, &name, dbFName, tbFName)); + } bool existedUsing = false; // USING clause if (TK_USING == sToken.type) { existedUsing = true; - CHECK_CODE(parseUsingClause(pCxt, &name, tbFName)); + CHECK_CODE(parseUsingClause(pCxt, tbNum, &name, tbFName)); NEXT_TOKEN(pCxt->pSql, sToken); autoCreateTbl = true; } @@ -1438,22 +1489,31 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) { // pSql -> field1_name, ...) pBoundColsStart = pCxt->pSql; CHECK_CODE(ignoreBoundColumns(pCxt)); - // CHECK_CODE(parseBoundColumns(pCxt, &dataBuf->boundColumnInfo, getTableColumnSchema(pCxt->pTableMeta))); NEXT_TOKEN(pCxt->pSql, sToken); } if (TK_USING == sToken.type) { - CHECK_CODE(parseUsingClause(pCxt, &name, tbFName)); + if (pCxt->pComCxt->async) { + CHECK_CODE(parseTableName(pCxt, &tbnameToken, &name, dbFName, tbFName)); + } + CHECK_CODE(parseUsingClause(pCxt, tbNum, &name, tbFName)); NEXT_TOKEN(pCxt->pSql, sToken); autoCreateTbl = true; } else if (!existedUsing) { - CHECK_CODE(getTableMeta(pCxt, &name, dbFName)); + CHECK_CODE(getTableMeta(pCxt, tbNum, &name, dbFName)); } STableDataBlocks* dataBuf = NULL; - CHECK_CODE(getDataBlockFromList(pCxt->pTableBlockHashObj, tbFName, strlen(tbFName), TSDB_DEFAULT_PAYLOAD_SIZE, - sizeof(SSubmitBlk), getTableInfo(pCxt->pTableMeta).rowSize, pCxt->pTableMeta, - &dataBuf, NULL, &pCxt->createTblReq)); + if (pCxt->pComCxt->async) { + CHECK_CODE(getDataBlockFromList(pCxt->pTableBlockHashObj, &pCxt->pTableMeta->uid, sizeof(pCxt->pTableMeta->uid), + TSDB_DEFAULT_PAYLOAD_SIZE, sizeof(SSubmitBlk), + getTableInfo(pCxt->pTableMeta).rowSize, pCxt->pTableMeta, &dataBuf, NULL, + &pCxt->createTblReq)); + } else { + CHECK_CODE(getDataBlockFromList(pCxt->pTableBlockHashObj, tbFName, strlen(tbFName), TSDB_DEFAULT_PAYLOAD_SIZE, + sizeof(SSubmitBlk), getTableInfo(pCxt->pTableMeta).rowSize, pCxt->pTableMeta, + &dataBuf, NULL, &pCxt->createTblReq)); + } if (NULL != pBoundColsStart) { char* pCurrPos = pCxt->pSql; @@ -1532,7 +1592,9 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache .totalNum = 0, .pOutput = (SVnodeModifOpStmt*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT), .pStmtCb = pContext->pStmtCb, - .pMetaCache = pMetaCache}; + .pMetaCache = pMetaCache, + .memElapsed = 0, + .parRowElapsed = 0}; if (pContext->pStmtCb && *pQuery) { (*pContext->pStmtCb->getExecInfoFn)(pContext->pStmtCb->pStmt, &context.pVgroupsHashObj, @@ -1547,7 +1609,7 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache } else { context.pVgroupsHashObj = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); context.pTableBlockHashObj = - taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); } if (NULL == context.pVgroupsHashObj || NULL == context.pTableBlockHashObj || NULL == context.pSubTableHashObj || @@ -1607,6 +1669,10 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache pDb = taosHashIterate(context.pDbFNameHashObj, pDb); } } + if (pContext->pStmtCb) { + context.pVgroupsHashObj = NULL; + context.pTableBlockHashObj = NULL; + } destroyInsertParseContext(&context); return code; } @@ -1656,24 +1722,32 @@ static int32_t skipUsingClause(SInsertParseSyntaxCxt* pCxt) { return TSDB_CODE_SUCCESS; } -static int32_t collectTableMetaKey(SInsertParseSyntaxCxt* pCxt, SToken* pTbToken) { - SName name; +static int32_t collectTableMetaKey(SInsertParseSyntaxCxt* pCxt, bool isStable, int32_t tableNo, SToken* pTbToken) { + SName name = {0}; CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg)); - CHECK_CODE(reserveUserAuthInCacheExt(pCxt->pComCxt->pUser, &name, AUTH_TYPE_WRITE, pCxt->pMetaCache)); - CHECK_CODE(reserveTableMetaInCacheExt(&name, pCxt->pMetaCache)); - CHECK_CODE(reserveTableVgroupInCacheExt(&name, pCxt->pMetaCache)); + CHECK_CODE(reserveTableMetaInCacheForInsert(&name, isStable ? CATALOG_REQ_TYPE_META : CATALOG_REQ_TYPE_BOTH, tableNo, + pCxt->pMetaCache)); + return TSDB_CODE_SUCCESS; +} + +static int32_t checkTableName(const char* pTableName, SMsgBuf* pMsgBuf) { + if (NULL != strchr(pTableName, '.')) { + return generateSyntaxErrMsgExt(pMsgBuf, TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME, "The table name cannot contain '.'"); + } return TSDB_CODE_SUCCESS; } -static int32_t collectAutoCreateTableMetaKey(SInsertParseSyntaxCxt* pCxt, SToken* pTbToken) { - SName name; +static int32_t collectAutoCreateTableMetaKey(SInsertParseSyntaxCxt* pCxt, int32_t tableNo, SToken* pTbToken) { + SName name = {0}; CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg)); - CHECK_CODE(reserveTableVgroupInCacheExt(&name, pCxt->pMetaCache)); + CHECK_CODE(checkTableName(name.tname, &pCxt->msg)); + CHECK_CODE(reserveTableMetaInCacheForInsert(&name, CATALOG_REQ_TYPE_VGROUP, tableNo, pCxt->pMetaCache)); return TSDB_CODE_SUCCESS; } static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) { - bool hasData = false; + bool hasData = false; + int32_t tableNo = 0; // for each table while (1) { SToken sToken; @@ -1702,9 +1776,9 @@ static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) { // USING clause if (TK_USING == sToken.type) { existedUsing = true; - CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, &tbnameToken)); + CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, tableNo, &tbnameToken)); NEXT_TOKEN(pCxt->pSql, sToken); - CHECK_CODE(collectTableMetaKey(pCxt, &sToken)); + CHECK_CODE(collectTableMetaKey(pCxt, true, tableNo, &sToken)); CHECK_CODE(skipUsingClause(pCxt)); NEXT_TOKEN(pCxt->pSql, sToken); } @@ -1717,15 +1791,17 @@ static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) { if (TK_USING == sToken.type && !existedUsing) { existedUsing = true; - CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, &tbnameToken)); + CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, tableNo, &tbnameToken)); NEXT_TOKEN(pCxt->pSql, sToken); - CHECK_CODE(collectTableMetaKey(pCxt, &sToken)); + CHECK_CODE(collectTableMetaKey(pCxt, true, tableNo, &sToken)); CHECK_CODE(skipUsingClause(pCxt)); NEXT_TOKEN(pCxt->pSql, sToken); - } else { - CHECK_CODE(collectTableMetaKey(pCxt, &tbnameToken)); + } else if (!existedUsing) { + CHECK_CODE(collectTableMetaKey(pCxt, false, tableNo, &tbnameToken)); } + ++tableNo; + if (TK_VALUES == sToken.type) { // pSql -> (field1_value, ...) [(field1_value2, ...) ...] CHECK_CODE(skipValuesClause(pCxt)); diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index ef985a38944288e156fa370384272197421a8920..a857d575f1313b06af0f4c55b44a8d0fb2a8e867 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -142,8 +142,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { }, { .showType = QUERY_NODE_SHOW_STREAMS_STMT, - .pDbName = TSDB_PERFORMANCE_SCHEMA_DB, - .pTableName = TSDB_PERFS_TABLE_STREAMS, + .pDbName = TSDB_INFORMATION_SCHEMA_DB, + .pTableName = TSDB_INS_TABLE_STREAMS, .numOfShowCols = 1, .pShowCols = {"stream_name"} }, @@ -184,8 +184,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { }, { .showType = QUERY_NODE_SHOW_TOPICS_STMT, - .pDbName = TSDB_PERFORMANCE_SCHEMA_DB, - .pTableName = TSDB_PERFS_TABLE_TOPICS, + .pDbName = TSDB_INFORMATION_SCHEMA_DB, + .pTableName = TSDB_INS_TABLE_TOPICS, .numOfShowCols = 1, .pShowCols = {"topic_name"} }, @@ -240,8 +240,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { }, { .showType = QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT, - .pDbName = TSDB_PERFORMANCE_SCHEMA_DB, - .pTableName = TSDB_PERFS_TABLE_SUBSCRIPTIONS, + .pDbName = TSDB_INFORMATION_SCHEMA_DB, + .pTableName = TSDB_INS_TABLE_SUBSCRIPTIONS, .numOfShowCols = 1, .pShowCols = {"*"} }, @@ -784,6 +784,9 @@ static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* p int32_t nums = pMeta->tableInfo.numOfColumns + (igTags ? 0 : ((TSDB_SUPER_TABLE == pMeta->tableType) ? pMeta->tableInfo.numOfTags : 0)); for (int32_t i = 0; i < nums; ++i) { + if (invisibleColumn(pCxt->pParseCxt->enableSysInfo, pMeta->tableType, pMeta->schema[i].flags)) { + continue; + } SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); if (NULL == pCol) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_OUT_OF_MEMORY); @@ -826,7 +829,8 @@ static int32_t findAndSetColumn(STranslateContext* pCxt, SColumnNode** pColRef, } int32_t nums = pMeta->tableInfo.numOfTags + pMeta->tableInfo.numOfColumns; for (int32_t i = 0; i < nums; ++i) { - if (0 == strcmp(pCol->colName, pMeta->schema[i].name)) { + if (0 == strcmp(pCol->colName, pMeta->schema[i].name) && + !invisibleColumn(pCxt->pParseCxt->enableSysInfo, pMeta->tableType, pMeta->schema[i].flags)) { setColumnInfoBySchema((SRealTableNode*)pTable, pMeta->schema + i, (i - pMeta->tableInfo.numOfColumns), pCol); *pFound = true; break; @@ -1399,7 +1403,7 @@ static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFu "%s function must be used in select statements", pFunc->functionName); } SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; - if (QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) && + if (NULL != pSelect->pFromTable && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) && !isTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, "%s function requires valid time series input", pFunc->functionName); @@ -1881,6 +1885,12 @@ static EDealRes doCheckExprForGroupBy(SNode** pNode, void* pContext) { return rewriteExprToGroupKeyFunc(pCxt, pNode); } } + if (NULL != pSelect->pWindow && QUERY_NODE_STATE_WINDOW == nodeType(pSelect->pWindow)) { + if (nodesEqualNode(((SStateWindowNode*)pSelect->pWindow)->pExpr, *pNode)) { + pSelect->hasStateKey = true; + return rewriteExprToGroupKeyFunc(pCxt, pNode); + } + } if (isScanPseudoColumnFunc(*pNode) || QUERY_NODE_COLUMN == nodeType(*pNode)) { if (pSelect->selectFuncNum > 1 || pSelect->hasOtherVectorFunc || !pSelect->hasSelectFunc) { return generateDealNodeErrMsg(pCxt, getGroupByErrorCode(pCxt)); @@ -1973,7 +1983,7 @@ static int32_t checkWindowFuncCoexist(STranslateContext* pCxt, SSelectStmt* pSel if (NULL == pSelect->pWindow) { return TSDB_CODE_SUCCESS; } - if (NULL != pSelect->pWindow && !pSelect->hasAggFuncs) { + if (NULL != pSelect->pWindow && !pSelect->hasAggFuncs && !pSelect->hasStateKey) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NO_VALID_FUNC_IN_WIN); } return TSDB_CODE_SUCCESS; @@ -2037,16 +2047,13 @@ static int32_t setVnodeSysTableVgroupList(STranslateContext* pCxt, SName* pName, code = getDBVgInfoImpl(pCxt, pName, &vgroupList); } - if (TSDB_CODE_SUCCESS == code && - 0 == strcmp(pRealTable->table.dbName, TSDB_INFORMATION_SCHEMA_DB) && - 0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TAGS) && - isSelectStmt(pCxt->pCurrStmt) && + if (TSDB_CODE_SUCCESS == code && 0 == strcmp(pRealTable->table.dbName, TSDB_INFORMATION_SCHEMA_DB) && + 0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TAGS) && isSelectStmt(pCxt->pCurrStmt) && 0 == taosArrayGetSize(vgroupList)) { ((SSelectStmt*)pCxt->pCurrStmt)->isEmptyResult = true; } - if (TSDB_CODE_SUCCESS == code && - 0 == strcmp(pRealTable->table.dbName, TSDB_INFORMATION_SCHEMA_DB) && + if (TSDB_CODE_SUCCESS == code && 0 == strcmp(pRealTable->table.dbName, TSDB_INFORMATION_SCHEMA_DB) && 0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TABLES)) { code = addMnodeToVgroupList(&pCxt->pParseCxt->mgmtEpSet, &vgroupList); } @@ -2153,15 +2160,16 @@ static int32_t setTableIndex(STranslateContext* pCxt, SName* pName, SRealTableNo return TSDB_CODE_SUCCESS; } -static int32_t setTableCacheLastMode(STranslateContext* pCxt, SName* pName, SRealTableNode* pRealTable) { - if (TSDB_SYSTEM_TABLE == pRealTable->pMeta->tableType) { +static int32_t setTableCacheLastMode(STranslateContext* pCxt, SSelectStmt* pSelect) { + if (!pSelect->hasLastRowFunc || QUERY_NODE_REAL_TABLE != nodeType(pSelect->pFromTable)) { return TSDB_CODE_SUCCESS; } - SDbCfgInfo dbCfg = {0}; - int32_t code = getDBCfg(pCxt, pRealTable->table.dbName, &dbCfg); + SRealTableNode* pTable = (SRealTableNode*)pSelect->pFromTable; + SDbCfgInfo dbCfg = {0}; + int32_t code = getDBCfg(pCxt, pTable->table.dbName, &dbCfg); if (TSDB_CODE_SUCCESS == code) { - pRealTable->cacheLastMode = dbCfg.cacheLast; + pTable->cacheLastMode = dbCfg.cacheLast; } return code; } @@ -2185,18 +2193,15 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) { if (TSDB_CODE_SUCCESS == code) { code = setTableIndex(pCxt, &name, pRealTable); } - if (TSDB_CODE_SUCCESS == code) { - code = setTableCacheLastMode(pCxt, &name, pRealTable); - } } - pRealTable->table.precision = pRealTable->pMeta->tableInfo.precision; - pRealTable->table.singleTable = isSingleTable(pRealTable); if (TSDB_CODE_SUCCESS == code) { + pRealTable->table.precision = pRealTable->pMeta->tableInfo.precision; + pRealTable->table.singleTable = isSingleTable(pRealTable); + if (TSDB_SUPER_TABLE == pRealTable->pMeta->tableType) { + pCxt->stableQuery = true; + } code = addNamespace(pCxt, pRealTable); } - if (TSDB_SUPER_TABLE == pRealTable->pMeta->tableType) { - pCxt->stableQuery = true; - } break; } case QUERY_NODE_TEMP_TABLE: { @@ -2266,10 +2271,14 @@ static SNode* createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr) { if (QUERY_NODE_COLUMN == nodeType(pExpr)) { SColumnNode* pCol = (SColumnNode*)pExpr; len = snprintf(buf, sizeof(buf), "%s(%s.%s)", pSrcFunc->functionName, pCol->tableAlias, pCol->colName); + strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1)); + len = snprintf(buf, sizeof(buf), "%s(%s)", pSrcFunc->functionName, pCol->colName); + strncpy(pFunc->node.userAlias, buf, TMIN(len, sizeof(pFunc->node.userAlias) - 1)); } else { len = snprintf(buf, sizeof(buf), "%s(%s)", pSrcFunc->functionName, pExpr->aliasName); + strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1)); + strncpy(pFunc->node.userAlias, buf, TMIN(len, sizeof(pFunc->node.userAlias) - 1)); } - strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1)); return (SNode*)pFunc; } @@ -2468,13 +2477,65 @@ static int32_t translateOrderBy(STranslateContext* pCxt, SSelectStmt* pSelect) { return code; } +static EDealRes needFillImpl(SNode* pNode, void* pContext) { + if (isAggFunc(pNode) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType) { + *(bool*)pContext = true; + return DEAL_RES_END; + } + return DEAL_RES_CONTINUE; +} + +static bool needFill(SNode* pNode) { + bool hasFillFunc = false; + nodesWalkExpr(pNode, needFillImpl, &hasFillFunc); + return hasFillFunc; +} + +static bool mismatchFillDataType(SDataType origDt, SDataType fillDt) { + if (TSDB_DATA_TYPE_NULL == fillDt.type) { + return false; + } + if (IS_NUMERIC_TYPE(origDt.type) && !IS_NUMERIC_TYPE(fillDt.type)) { + return true; + } + if (IS_VAR_DATA_TYPE(origDt.type) && !IS_VAR_DATA_TYPE(fillDt.type)) { + return true; + } + return false; +} + +static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeList* pProjectionList) { + if (FILL_MODE_VALUE != pFill->mode) { + return TSDB_CODE_SUCCESS; + } + + int32_t fillNo = 0; + SNodeListNode* pFillValues = (SNodeListNode*)pFill->pValues; + SNode* pProject = NULL; + FOREACH(pProject, pProjectionList) { + if (needFill(pProject)) { + if (fillNo >= LIST_LENGTH(pFillValues->pNodeList)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled values number mismatch"); + } + if (mismatchFillDataType(((SExprNode*)pProject)->resType, + ((SExprNode*)nodesListGetNode(pFillValues->pNodeList, fillNo))->resType)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch"); + } + ++fillNo; + } + } + if (fillNo != LIST_LENGTH(pFillValues->pNodeList)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled values number mismatch"); + } + return TSDB_CODE_SUCCESS; +} + static int32_t translateFillValues(STranslateContext* pCxt, SSelectStmt* pSelect) { if (NULL == pSelect->pWindow || QUERY_NODE_INTERVAL_WINDOW != nodeType(pSelect->pWindow) || NULL == ((SIntervalWindowNode*)pSelect->pWindow)->pFill) { return TSDB_CODE_SUCCESS; } - SFillNode* pFill = (SFillNode*)((SIntervalWindowNode*)pSelect->pWindow)->pFill; - return TSDB_CODE_SUCCESS; + return checkFillValues(pCxt, (SFillNode*)((SIntervalWindowNode*)pSelect->pWindow)->pFill, pSelect->pProjectionList); } static int32_t rewriteProjectAlias(SNodeList* pProjectionList) { @@ -2591,8 +2652,12 @@ static int32_t getQueryTimeRange(STranslateContext* pCxt, SNode* pWhere, STimeWi return code; } -static int32_t checkFill(STranslateContext* pCxt, SFillNode* pFill, SValueNode* pInterval) { +static int32_t checkFill(STranslateContext* pCxt, SFillNode* pFill, SValueNode* pInterval, bool isInterpFill) { if (FILL_MODE_NONE == pFill->mode) { + if (isInterpFill) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Unsupported fill type"); + } + return TSDB_CODE_SUCCESS; } @@ -2632,7 +2697,7 @@ static int32_t translateFill(STranslateContext* pCxt, SSelectStmt* pSelect, SInt } ((SFillNode*)pInterval->pFill)->timeRange = pSelect->timeRange; - return checkFill(pCxt, (SFillNode*)pInterval->pFill, (SValueNode*)pInterval->pInterval); + return checkFill(pCxt, (SFillNode*)pInterval->pFill, (SValueNode*)pInterval->pInterval, false); } static int64_t getMonthsFromTimeVal(int64_t val, int32_t fromPrecision, char unit) { @@ -2825,6 +2890,29 @@ static int32_t createDefaultFillNode(STranslateContext* pCxt, SNode** pOutput) { return TSDB_CODE_SUCCESS; } +static int32_t checkEvery(STranslateContext* pCxt, SValueNode* pInterval) { + int32_t len = strlen(pInterval->literal); + + char* unit = &pInterval->literal[len - 1]; + if (*unit == 'n' || *unit == 'y') { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, + "Unsupported time unit in EVERY clause"); + } + + return TSDB_CODE_SUCCESS; +} + +static int32_t translateInterpEvery(STranslateContext* pCxt, SNode** pEvery) { + int32_t code = TSDB_CODE_SUCCESS; + + code = checkEvery(pCxt, (SValueNode*)(*pEvery)); + if (TSDB_CODE_SUCCESS == code) { + code = translateExpr(pCxt, pEvery); + } + + return code; +} + static int32_t translateInterpFill(STranslateContext* pCxt, SSelectStmt* pSelect) { int32_t code = TSDB_CODE_SUCCESS; @@ -2838,7 +2926,7 @@ static int32_t translateInterpFill(STranslateContext* pCxt, SSelectStmt* pSelect code = getQueryTimeRange(pCxt, pSelect->pRange, &(((SFillNode*)pSelect->pFill)->timeRange)); } if (TSDB_CODE_SUCCESS == code) { - code = checkFill(pCxt, (SFillNode*)pSelect->pFill, (SValueNode*)pSelect->pEvery); + code = checkFill(pCxt, (SFillNode*)pSelect->pFill, (SValueNode*)pSelect->pEvery, true); } return code; @@ -2859,7 +2947,7 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) { int32_t code = translateExpr(pCxt, &pSelect->pRange); if (TSDB_CODE_SUCCESS == code) { - code = translateExpr(pCxt, &pSelect->pEvery); + code = translateInterpEvery(pCxt, &pSelect->pEvery); } if (TSDB_CODE_SUCCESS == code) { code = translateInterpFill(pCxt, pSelect); @@ -3054,6 +3142,9 @@ static int32_t translateSelectFrom(STranslateContext* pCxt, SSelectStmt* pSelect if (TSDB_CODE_SUCCESS == code) { code = replaceOrderByAliasForSelect(pCxt, pSelect); } + if (TSDB_CODE_SUCCESS == code) { + code = setTableCacheLastMode(pCxt, pSelect); + } return code; } @@ -4967,7 +5058,7 @@ static int32_t checkCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pSt return TSDB_CODE_SUCCESS; } - if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery) || + if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery) || NULL == ((SSelectStmt*)pStmt->pQuery)->pFromTable || QUERY_NODE_REAL_TABLE != nodeType(((SSelectStmt*)pStmt->pQuery)->pFromTable)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query"); } diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index ae5a281aab92ab5e365fe19e1769d95b2b43ea47..32513fd0b6f56097b2b7f08ae03725ce39498a37 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -476,9 +476,11 @@ static int32_t buildDbReq(SHashObj* pDbsHash, SArray** pDbs) { static int32_t buildTableReqFromDb(SHashObj* pDbsHash, SArray** pDbs) { if (NULL != pDbsHash) { - *pDbs = taosArrayInit(taosHashGetSize(pDbsHash), sizeof(STablesReq)); if (NULL == *pDbs) { - return TSDB_CODE_OUT_OF_MEMORY; + *pDbs = taosArrayInit(taosHashGetSize(pDbsHash), sizeof(STablesReq)); + if (NULL == *pDbs) { + return TSDB_CODE_OUT_OF_MEMORY; + } } SParseTablesMetaReq* p = taosHashIterate(pDbsHash, NULL); while (NULL != p) { @@ -530,7 +532,62 @@ static int32_t buildUdfReq(SHashObj* pUdfHash, SArray** pUdf) { return TSDB_CODE_SUCCESS; } -int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) { +static int32_t buildCatalogReqForInsert(SParseContext* pCxt, const SParseMetaCache* pMetaCache, + SCatalogReq* pCatalogReq) { + int32_t ndbs = taosHashGetSize(pMetaCache->pInsertTables); + pCatalogReq->pTableMeta = taosArrayInit(ndbs, sizeof(STablesReq)); + if (NULL == pCatalogReq->pTableMeta) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pCatalogReq->pTableHash = taosArrayInit(ndbs, sizeof(STablesReq)); + if (NULL == pCatalogReq->pTableHash) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pCatalogReq->pUser = taosArrayInit(ndbs, sizeof(SUserAuthInfo)); + if (NULL == pCatalogReq->pUser) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + pCxt->pTableMetaPos = taosArrayInit(pMetaCache->sqlTableNum, sizeof(int32_t)); + pCxt->pTableVgroupPos = taosArrayInit(pMetaCache->sqlTableNum, sizeof(int32_t)); + + int32_t metaReqNo = 0; + int32_t vgroupReqNo = 0; + SInsertTablesMetaReq* p = taosHashIterate(pMetaCache->pInsertTables, NULL); + while (NULL != p) { + STablesReq req = {0}; + strcpy(req.dbFName, p->dbFName); + TSWAP(req.pTables, p->pTableMetaReq); + taosArrayPush(pCatalogReq->pTableMeta, &req); + + req.pTables = NULL; + TSWAP(req.pTables, p->pTableVgroupReq); + taosArrayPush(pCatalogReq->pTableHash, &req); + + int32_t ntables = taosArrayGetSize(p->pTableMetaPos); + for (int32_t i = 0; i < ntables; ++i) { + taosArrayInsert(pCxt->pTableMetaPos, *(int32_t*)taosArrayGet(p->pTableMetaPos, i), &metaReqNo); + ++metaReqNo; + } + + ntables = taosArrayGetSize(p->pTableVgroupPos); + for (int32_t i = 0; i < ntables; ++i) { + taosArrayInsert(pCxt->pTableVgroupPos, *(int32_t*)taosArrayGet(p->pTableVgroupPos, i), &vgroupReqNo); + ++vgroupReqNo; + } + + SUserAuthInfo auth = {0}; + strcpy(auth.user, pCxt->pUser); + strcpy(auth.dbFName, p->dbFName); + auth.type = AUTH_TYPE_WRITE; + taosArrayPush(pCatalogReq->pUser, &auth); + + p = taosHashIterate(pMetaCache->pInsertTables, p); + } + return TSDB_CODE_SUCCESS; +} + +int32_t buildCatalogReqForQuery(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) { int32_t code = buildTableReqFromDb(pMetaCache->pTableMeta, &pCatalogReq->pTableMeta); if (TSDB_CODE_SUCCESS == code) { code = buildDbReq(pMetaCache->pDbVgroup, &pCatalogReq->pDbVgroup); @@ -560,6 +617,13 @@ int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalog return code; } +int32_t buildCatalogReq(SParseContext* pCxt, const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) { + if (NULL != pMetaCache->pInsertTables) { + return buildCatalogReqForInsert(pCxt, pMetaCache, pCatalogReq); + } + return buildCatalogReqForQuery(pMetaCache, pCatalogReq); +} + static int32_t putMetaDataToHash(const char* pKey, int32_t len, const SArray* pData, int32_t index, SHashObj** pHash) { if (NULL == *pHash) { *pHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); @@ -647,7 +711,8 @@ static int32_t putUdfToCache(const SArray* pUdfReq, const SArray* pUdfData, SHas return TSDB_CODE_SUCCESS; } -int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) { +int32_t putMetaDataToCacheForQuery(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, + SParseMetaCache* pMetaCache) { int32_t code = putDbTableDataToCache(pCatalogReq->pTableMeta, pMetaData->pTableMeta, &pMetaCache->pTableMeta); if (TSDB_CODE_SUCCESS == code) { code = putDbDataToCache(pCatalogReq->pDbVgroup, pMetaData->pDbVgroup, &pMetaCache->pDbVgroup); @@ -677,6 +742,30 @@ int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMet return code; } +int32_t putMetaDataToCacheForInsert(const SMetaData* pMetaData, SParseMetaCache* pMetaCache) { + int32_t ndbs = taosArrayGetSize(pMetaData->pUser); + for (int32_t i = 0; i < ndbs; ++i) { + SMetaRes* pRes = taosArrayGet(pMetaData->pUser, i); + if (TSDB_CODE_SUCCESS != pRes->code) { + return pRes->code; + } + if (!(*(bool*)pRes->pRes)) { + return TSDB_CODE_PAR_PERMISSION_DENIED; + } + } + pMetaCache->pTableMetaData = pMetaData->pTableMeta; + pMetaCache->pTableVgroupData = pMetaData->pTableHash; + return TSDB_CODE_SUCCESS; +} + +int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache, + bool insertValuesStmt) { + if (insertValuesStmt) { + return putMetaDataToCacheForInsert(pMetaData, pMetaCache); + } + return putMetaDataToCacheForQuery(pCatalogReq, pMetaData, pMetaCache); +} + static int32_t reserveTableReqInCacheImpl(const char* pTbFName, int32_t len, SHashObj** pTables) { if (NULL == *pTables) { *pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); @@ -977,6 +1066,82 @@ int32_t getDnodeListFromCache(SParseMetaCache* pMetaCache, SArray** pDnodes) { return TSDB_CODE_SUCCESS; } +static int32_t reserveTableReqInCacheForInsert(const SName* pName, ECatalogReqType reqType, int32_t tableNo, + SInsertTablesMetaReq* pReq) { + switch (reqType) { + case CATALOG_REQ_TYPE_META: + taosArrayPush(pReq->pTableMetaReq, pName); + taosArrayPush(pReq->pTableMetaPos, &tableNo); + break; + case CATALOG_REQ_TYPE_VGROUP: + taosArrayPush(pReq->pTableVgroupReq, pName); + taosArrayPush(pReq->pTableVgroupPos, &tableNo); + break; + case CATALOG_REQ_TYPE_BOTH: + taosArrayPush(pReq->pTableMetaReq, pName); + taosArrayPush(pReq->pTableMetaPos, &tableNo); + taosArrayPush(pReq->pTableVgroupReq, pName); + taosArrayPush(pReq->pTableVgroupPos, &tableNo); + break; + default: + break; + } + return TSDB_CODE_SUCCESS; +} + +static int32_t reserveTableReqInDbCacheForInsert(const SName* pName, ECatalogReqType reqType, int32_t tableNo, + SHashObj* pDbs) { + SInsertTablesMetaReq req = {.pTableMetaReq = taosArrayInit(4, sizeof(SName)), + .pTableMetaPos = taosArrayInit(4, sizeof(int32_t)), + .pTableVgroupReq = taosArrayInit(4, sizeof(SName)), + .pTableVgroupPos = taosArrayInit(4, sizeof(int32_t))}; + tNameGetFullDbName(pName, req.dbFName); + int32_t code = reserveTableReqInCacheForInsert(pName, reqType, tableNo, &req); + if (TSDB_CODE_SUCCESS == code) { + code = taosHashPut(pDbs, pName->dbname, strlen(pName->dbname), &req, sizeof(SInsertTablesMetaReq)); + } + return code; +} + +int32_t reserveTableMetaInCacheForInsert(const SName* pName, ECatalogReqType reqType, int32_t tableNo, + SParseMetaCache* pMetaCache) { + if (NULL == pMetaCache->pInsertTables) { + pMetaCache->pInsertTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + if (NULL == pMetaCache->pInsertTables) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + pMetaCache->sqlTableNum = tableNo; + SInsertTablesMetaReq* pReq = taosHashGet(pMetaCache->pInsertTables, pName->dbname, strlen(pName->dbname)); + if (NULL == pReq) { + return reserveTableReqInDbCacheForInsert(pName, reqType, tableNo, pMetaCache->pInsertTables); + } + return reserveTableReqInCacheForInsert(pName, reqType, tableNo, pReq); +} + +int32_t getTableMetaFromCacheForInsert(SArray* pTableMetaPos, SParseMetaCache* pMetaCache, int32_t tableNo, + STableMeta** pMeta) { + int32_t reqIndex = *(int32_t*)taosArrayGet(pTableMetaPos, tableNo); + SMetaRes* pRes = taosArrayGet(pMetaCache->pTableMetaData, reqIndex); + if (TSDB_CODE_SUCCESS == pRes->code) { + *pMeta = pRes->pRes; + if (NULL == *pMeta) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return pRes->code; +} + +int32_t getTableVgroupFromCacheForInsert(SArray* pTableVgroupPos, SParseMetaCache* pMetaCache, int32_t tableNo, + SVgroupInfo* pVgroup) { + int32_t reqIndex = *(int32_t*)taosArrayGet(pTableVgroupPos, tableNo); + SMetaRes* pRes = taosArrayGet(pMetaCache->pTableVgroupData, reqIndex); + if (TSDB_CODE_SUCCESS == pRes->code) { + memcpy(pVgroup, pRes->pRes, sizeof(SVgroupInfo)); + } + return pRes->code; +} + void destoryParseTablesMetaReqHash(SHashObj* pHash) { SParseTablesMetaReq* p = taosHashIterate(pHash, NULL); while (NULL != p) { @@ -994,6 +1159,16 @@ void destoryParseMetaCache(SParseMetaCache* pMetaCache, bool request) { taosHashCleanup(pMetaCache->pTableMeta); taosHashCleanup(pMetaCache->pTableVgroup); } + SInsertTablesMetaReq* p = taosHashIterate(pMetaCache->pInsertTables, NULL); + while (NULL != p) { + taosArrayDestroy(p->pTableMetaPos); + taosArrayDestroy(p->pTableMetaReq); + taosArrayDestroy(p->pTableVgroupPos); + taosArrayDestroy(p->pTableVgroupReq); + + p = taosHashIterate(pMetaCache->pInsertTables, p); + } + taosHashCleanup(pMetaCache->pInsertTables); taosHashCleanup(pMetaCache->pDbVgroup); taosHashCleanup(pMetaCache->pDbCfg); taosHashCleanup(pMetaCache->pDbInfo); diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c index 34cd783ace5c84608de6d62ae6b994c2fbb9e3c3..7ee6a5b2236b24a676214c3538ed182aa52f427a 100644 --- a/source/libs/parser/src/parser.c +++ b/source/libs/parser/src/parser.c @@ -136,8 +136,7 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) { } static EDealRes rewriteQueryExprAliasImpl(SNode* pNode, void* pContext) { - if (nodesIsExprNode(pNode) && QUERY_NODE_COLUMN != nodeType(pNode) && '\0' == ((SExprNode*)pNode)->userAlias[0]) { - strcpy(((SExprNode*)pNode)->userAlias, ((SExprNode*)pNode)->aliasName); + if (nodesIsExprNode(pNode) && QUERY_NODE_COLUMN != nodeType(pNode)) { sprintf(((SExprNode*)pNode)->aliasName, "#%d", *(int32_t*)pContext); ++(*(int32_t*)pContext); } @@ -185,7 +184,7 @@ int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq code = parseSqlSyntax(pCxt, pQuery, &metaCache); } if (TSDB_CODE_SUCCESS == code) { - code = buildCatalogReq(&metaCache, pCatalogReq); + code = buildCatalogReq(pCxt, &metaCache, pCatalogReq); } destoryParseMetaCache(&metaCache, true); terrno = code; @@ -195,7 +194,7 @@ int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq, const struct SMetaData* pMetaData, SQuery* pQuery) { SParseMetaCache metaCache = {0}; - int32_t code = putMetaDataToCache(pCatalogReq, pMetaData, &metaCache); + int32_t code = putMetaDataToCache(pCatalogReq, pMetaData, &metaCache, NULL == pQuery->pRoot); if (TSDB_CODE_SUCCESS == code) { if (NULL == pQuery->pRoot) { code = parseInsertSql(pCxt, &pQuery, &metaCache); diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c index c820e955d78dc9439499d21645c2456884edb318..c4bd1aff044a491edede232eff74b8dea1feeadb 100644 --- a/source/libs/parser/src/sql.c +++ b/source/libs/parser/src/sql.c @@ -104,26 +104,26 @@ #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 427 +#define YYNOCODE 426 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SToken typedef union { int yyinit; ParseTOKENTYPE yy0; - SAlterOption yy95; - EOperatorType yy198; - EOrder yy204; - int8_t yy215; - ENullOrder yy277; - bool yy313; - int64_t yy473; - SNodeList* yy544; - SToken yy617; - EJoinType yy708; - SDataType yy784; - EFillMode yy816; - SNode* yy840; - int32_t yy844; + SAlterOption yy5; + int8_t yy59; + int64_t yy69; + EJoinType yy156; + SNodeList* yy172; + EFillMode yy186; + SToken yy209; + int32_t yy232; + SNode* yy272; + bool yy293; + EOperatorType yy392; + ENullOrder yy493; + SDataType yy616; + EOrder yy818; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -140,16 +140,16 @@ typedef union { #define ParseCTX_STORE #define YYFALLBACK 1 #define YYNSTATE 667 -#define YYNRULE 491 +#define YYNRULE 489 #define YYNTOKEN 305 #define YY_MAX_SHIFT 666 -#define YY_MIN_SHIFTREDUCE 973 -#define YY_MAX_SHIFTREDUCE 1463 -#define YY_ERROR_ACTION 1464 -#define YY_ACCEPT_ACTION 1465 -#define YY_NO_ACTION 1466 -#define YY_MIN_REDUCE 1467 -#define YY_MAX_REDUCE 1957 +#define YY_MIN_SHIFTREDUCE 972 +#define YY_MAX_SHIFTREDUCE 1460 +#define YY_ERROR_ACTION 1461 +#define YY_ACCEPT_ACTION 1462 +#define YY_NO_ACTION 1463 +#define YY_MIN_REDUCE 1464 +#define YY_MAX_REDUCE 1952 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -216,694 +216,650 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (2548) +#define YY_ACTTAB_COUNT (2259) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 526, 30, 261, 526, 549, 433, 526, 434, 1502, 11, - /* 10 */ 10, 117, 39, 37, 55, 1653, 1654, 117, 471, 378, - /* 20 */ 339, 1468, 1264, 1006, 476, 1023, 1290, 1022, 1607, 1791, - /* 30 */ 1598, 1607, 127, 1340, 1607, 1262, 441, 552, 434, 1502, - /* 40 */ 469, 1775, 107, 1779, 1290, 106, 105, 104, 103, 102, - /* 50 */ 101, 100, 99, 98, 1775, 1024, 1335, 1809, 150, 64, - /* 60 */ 1935, 14, 1567, 1010, 1011, 553, 1771, 1777, 1270, 450, - /* 70 */ 1761, 125, 577, 165, 39, 37, 1403, 1932, 571, 1771, - /* 80 */ 1777, 328, 339, 1529, 1264, 551, 161, 1877, 1878, 1, - /* 90 */ 1882, 571, 1659, 479, 478, 1340, 1823, 1262, 1376, 327, - /* 100 */ 95, 1792, 580, 1794, 1795, 576, 496, 571, 1657, 344, - /* 110 */ 1869, 663, 1652, 1654, 330, 1865, 160, 513, 1335, 494, - /* 120 */ 1935, 492, 1289, 14, 325, 1342, 1343, 1705, 164, 543, - /* 130 */ 1270, 1161, 1162, 1934, 33, 32, 1895, 1932, 40, 38, - /* 140 */ 36, 35, 34, 148, 63, 1479, 640, 639, 638, 637, + /* 0 */ 433, 1930, 434, 1499, 1593, 441, 526, 434, 1499, 513, + /* 10 */ 30, 260, 39, 37, 1929, 326, 325, 117, 1927, 1702, + /* 20 */ 339, 1465, 1261, 146, 471, 40, 38, 36, 35, 34, + /* 30 */ 1786, 552, 1606, 1337, 1604, 1259, 344, 552, 1287, 1649, + /* 40 */ 1651, 378, 107, 1774, 526, 106, 105, 104, 103, 102, + /* 50 */ 101, 100, 99, 98, 1770, 117, 1332, 432, 1804, 64, + /* 60 */ 436, 14, 476, 36, 35, 34, 553, 148, 1267, 1476, + /* 70 */ 450, 1756, 1604, 577, 39, 37, 1400, 1595, 1766, 1772, + /* 80 */ 328, 1930, 339, 1526, 1261, 1804, 217, 1286, 1770, 1, + /* 90 */ 571, 1005, 1656, 542, 164, 1337, 1818, 1259, 1927, 327, + /* 100 */ 95, 1787, 580, 1789, 1790, 576, 43, 571, 1654, 158, + /* 110 */ 1864, 663, 1766, 1772, 330, 1860, 159, 513, 1332, 63, + /* 120 */ 1930, 78, 1643, 14, 571, 1339, 1340, 1703, 163, 541, + /* 130 */ 1267, 1009, 1010, 165, 33, 32, 1890, 1927, 40, 38, + /* 140 */ 36, 35, 34, 543, 63, 63, 640, 639, 638, 637, /* 150 */ 349, 2, 636, 635, 128, 630, 629, 628, 627, 626, /* 160 */ 625, 624, 139, 620, 619, 618, 348, 347, 615, 614, - /* 170 */ 1265, 107, 1263, 663, 106, 105, 104, 103, 102, 101, - /* 180 */ 100, 99, 98, 1809, 36, 35, 34, 1342, 1343, 224, - /* 190 */ 225, 542, 384, 1268, 1269, 613, 1317, 1318, 1320, 1321, - /* 200 */ 1322, 1323, 1324, 1325, 573, 569, 1333, 1334, 1336, 1337, - /* 210 */ 1338, 1339, 1341, 1344, 1467, 1288, 1434, 33, 32, 482, - /* 220 */ 481, 40, 38, 36, 35, 34, 123, 168, 541, 303, - /* 230 */ 1465, 223, 1265, 84, 1263, 1264, 477, 480, 116, 115, - /* 240 */ 114, 113, 112, 111, 110, 109, 108, 305, 1262, 1023, - /* 250 */ 516, 1022, 22, 174, 1600, 1268, 1269, 1490, 1317, 1318, - /* 260 */ 1320, 1321, 1322, 1323, 1324, 1325, 573, 569, 1333, 1334, - /* 270 */ 1336, 1337, 1338, 1339, 1341, 1344, 39, 37, 1489, 1024, - /* 280 */ 538, 1270, 168, 526, 339, 71, 1264, 1488, 70, 354, - /* 290 */ 1244, 1245, 1708, 1791, 170, 211, 512, 1340, 1761, 1262, - /* 300 */ 1119, 602, 601, 600, 1123, 599, 1125, 1126, 598, 1128, - /* 310 */ 595, 1607, 1134, 592, 1136, 1137, 589, 586, 1935, 1761, - /* 320 */ 1335, 1809, 1584, 1270, 663, 14, 1659, 1935, 1761, 553, - /* 330 */ 1935, 166, 1270, 343, 1761, 1932, 577, 1935, 39, 37, - /* 340 */ 1933, 487, 1657, 165, 1932, 552, 339, 1932, 1264, 549, - /* 350 */ 165, 76, 305, 2, 1932, 516, 497, 544, 539, 1340, - /* 360 */ 1823, 1262, 1698, 159, 95, 1792, 580, 1794, 1795, 576, - /* 370 */ 210, 571, 63, 173, 1869, 663, 1646, 127, 330, 1865, - /* 380 */ 160, 552, 1335, 1265, 490, 1263, 419, 605, 484, 1342, - /* 390 */ 1343, 33, 32, 209, 1270, 40, 38, 36, 35, 34, - /* 400 */ 1896, 634, 632, 39, 37, 1345, 1268, 1269, 1487, 91, - /* 410 */ 622, 339, 1791, 1264, 42, 8, 125, 40, 38, 36, - /* 420 */ 35, 34, 124, 611, 1340, 58, 1262, 1596, 57, 49, - /* 430 */ 1599, 162, 1877, 1878, 1265, 1882, 1263, 663, 178, 177, - /* 440 */ 1809, 352, 137, 136, 608, 607, 606, 1335, 575, 1761, - /* 450 */ 43, 1342, 1343, 1761, 316, 577, 1486, 1268, 1269, 1270, - /* 460 */ 1317, 1318, 1320, 1321, 1322, 1323, 1324, 1325, 573, 569, - /* 470 */ 1333, 1334, 1336, 1337, 1338, 1339, 1341, 1344, 63, 1823, - /* 480 */ 9, 74, 1935, 294, 1792, 580, 1794, 1795, 576, 574, - /* 490 */ 571, 568, 1841, 1289, 122, 165, 1265, 1761, 1263, 1932, - /* 500 */ 33, 32, 663, 1602, 40, 38, 36, 35, 34, 317, - /* 510 */ 168, 315, 314, 1485, 473, 351, 1342, 1343, 475, 1268, - /* 520 */ 1269, 1291, 1317, 1318, 1320, 1321, 1322, 1323, 1324, 1325, - /* 530 */ 573, 569, 1333, 1334, 1336, 1337, 1338, 1339, 1341, 1344, - /* 540 */ 474, 1010, 1011, 33, 32, 1460, 1364, 40, 38, 36, - /* 550 */ 35, 34, 168, 168, 1761, 526, 1935, 1592, 377, 146, - /* 560 */ 376, 1265, 63, 1263, 26, 1532, 382, 168, 1610, 165, - /* 570 */ 33, 32, 217, 1932, 40, 38, 36, 35, 34, 218, - /* 580 */ 1484, 1791, 1414, 1607, 1268, 1269, 1594, 1317, 1318, 1320, - /* 590 */ 1321, 1322, 1323, 1324, 1325, 573, 569, 1333, 1334, 1336, - /* 600 */ 1337, 1338, 1339, 1341, 1344, 39, 37, 77, 27, 1809, - /* 610 */ 498, 1884, 63, 339, 78, 1264, 168, 578, 1369, 1483, - /* 620 */ 505, 1761, 1761, 373, 577, 1302, 1340, 28, 1262, 482, - /* 630 */ 481, 1482, 1459, 33, 32, 1881, 123, 40, 38, 36, - /* 640 */ 35, 34, 375, 371, 438, 1590, 477, 480, 1823, 1335, - /* 650 */ 1287, 1935, 96, 1792, 580, 1794, 1795, 576, 253, 571, - /* 660 */ 1761, 1270, 1869, 513, 165, 1481, 1868, 1865, 1932, 1081, - /* 670 */ 33, 32, 1761, 1706, 40, 38, 36, 35, 34, 666, - /* 680 */ 33, 32, 9, 526, 40, 38, 36, 35, 34, 1478, - /* 690 */ 1477, 33, 32, 268, 383, 40, 38, 36, 35, 34, - /* 700 */ 168, 1704, 1083, 300, 663, 432, 1761, 157, 436, 1698, - /* 710 */ 214, 1607, 656, 652, 648, 644, 266, 1582, 1342, 1343, - /* 720 */ 176, 33, 32, 307, 572, 40, 38, 36, 35, 34, - /* 730 */ 1761, 1761, 39, 37, 526, 604, 526, 302, 1476, 1287, - /* 740 */ 339, 549, 1264, 526, 307, 389, 412, 404, 92, 424, - /* 750 */ 168, 231, 1302, 1340, 405, 1262, 440, 1585, 74, 436, - /* 760 */ 1362, 1407, 1607, 1265, 1607, 1263, 397, 1289, 425, 127, - /* 770 */ 399, 1607, 1475, 1703, 1779, 300, 1335, 1889, 1396, 1761, - /* 780 */ 1603, 1362, 44, 4, 523, 1775, 1268, 1269, 1270, 1317, - /* 790 */ 1318, 1320, 1321, 1322, 1323, 1324, 1325, 573, 569, 1333, - /* 800 */ 1334, 1336, 1337, 1338, 1339, 1341, 1344, 390, 125, 2, - /* 810 */ 1771, 1777, 334, 1761, 1363, 7, 220, 450, 611, 386, - /* 820 */ 90, 526, 571, 163, 1877, 1878, 1659, 1882, 1424, 145, - /* 830 */ 87, 663, 448, 312, 1236, 1363, 213, 137, 136, 608, - /* 840 */ 607, 606, 1657, 1480, 1884, 1342, 1343, 423, 1474, 1607, + /* 170 */ 1262, 107, 1260, 663, 106, 105, 104, 103, 102, 101, + /* 180 */ 100, 99, 98, 440, 1775, 1287, 436, 1339, 1340, 223, + /* 190 */ 224, 11, 10, 1265, 1266, 1770, 1314, 1315, 1317, 1318, + /* 200 */ 1319, 1320, 1321, 1322, 573, 569, 1330, 1331, 1333, 1334, + /* 210 */ 1335, 1336, 1338, 1341, 1464, 496, 1431, 33, 32, 1766, + /* 220 */ 1772, 40, 38, 36, 35, 34, 526, 167, 494, 150, + /* 230 */ 492, 571, 1262, 1564, 1260, 1261, 210, 55, 116, 115, + /* 240 */ 114, 113, 112, 111, 110, 109, 108, 305, 1259, 63, + /* 250 */ 516, 1701, 22, 300, 1604, 1265, 1266, 167, 1314, 1315, + /* 260 */ 1317, 1318, 1319, 1320, 1321, 1322, 573, 569, 1330, 1331, + /* 270 */ 1333, 1334, 1335, 1336, 1338, 1341, 39, 37, 1650, 1651, + /* 280 */ 1373, 1267, 167, 167, 339, 552, 1261, 613, 1286, 49, + /* 290 */ 1160, 1161, 76, 305, 1786, 1487, 516, 1337, 1421, 1259, + /* 300 */ 1118, 602, 601, 600, 1122, 599, 1124, 1125, 598, 1127, + /* 310 */ 595, 342, 1133, 592, 1135, 1136, 589, 586, 1285, 146, + /* 320 */ 1332, 1581, 1804, 173, 663, 14, 479, 478, 1606, 377, + /* 330 */ 553, 376, 1267, 1656, 1930, 1756, 1756, 577, 39, 37, + /* 340 */ 312, 535, 1419, 1420, 1422, 1423, 339, 1928, 1261, 1654, + /* 350 */ 1705, 1927, 84, 2, 42, 71, 1656, 63, 70, 1337, + /* 360 */ 1818, 1259, 1267, 343, 95, 1787, 580, 1789, 1790, 576, + /* 370 */ 605, 571, 1654, 1597, 1864, 663, 345, 1589, 330, 1860, + /* 380 */ 159, 1288, 1332, 1262, 146, 1260, 1022, 167, 1021, 1339, + /* 390 */ 1340, 33, 32, 1606, 1267, 40, 38, 36, 35, 34, + /* 400 */ 1891, 1930, 384, 39, 37, 1342, 1265, 1266, 1486, 1485, + /* 410 */ 611, 339, 1786, 1261, 165, 8, 1023, 438, 1927, 634, + /* 420 */ 632, 1080, 611, 1284, 1337, 622, 1259, 167, 549, 137, + /* 430 */ 136, 608, 607, 606, 1262, 1695, 1260, 663, 1484, 303, + /* 440 */ 1804, 137, 136, 608, 607, 606, 172, 1332, 575, 1756, + /* 450 */ 1756, 1339, 1340, 1756, 1082, 577, 127, 1265, 1266, 1267, + /* 460 */ 1314, 1315, 1317, 1318, 1319, 1320, 1321, 1322, 573, 569, + /* 470 */ 1330, 1331, 1333, 1334, 1335, 1336, 1338, 1341, 1818, 1756, + /* 480 */ 9, 1591, 293, 1787, 580, 1789, 1790, 576, 574, 571, + /* 490 */ 568, 1836, 167, 74, 125, 167, 1262, 222, 1260, 33, + /* 500 */ 32, 1529, 663, 40, 38, 36, 35, 34, 551, 160, + /* 510 */ 1872, 1873, 1587, 1877, 1483, 1600, 1339, 1340, 252, 1265, + /* 520 */ 1266, 1579, 1314, 1315, 1317, 1318, 1319, 1320, 1321, 1322, + /* 530 */ 573, 569, 1330, 1331, 1333, 1334, 1335, 1336, 1338, 1341, + /* 540 */ 1700, 526, 300, 33, 32, 1457, 91, 40, 38, 36, + /* 550 */ 35, 34, 169, 167, 316, 1756, 1241, 1242, 623, 124, + /* 560 */ 1576, 1262, 1879, 1260, 26, 482, 481, 1596, 1462, 1604, + /* 570 */ 33, 32, 123, 1582, 40, 38, 36, 35, 34, 213, + /* 580 */ 1786, 1411, 477, 480, 1265, 1266, 1876, 1314, 1315, 1317, + /* 590 */ 1318, 1319, 1320, 1321, 1322, 573, 569, 1330, 1331, 1333, + /* 600 */ 1334, 1335, 1336, 1338, 1341, 39, 37, 475, 1804, 317, + /* 610 */ 146, 315, 314, 339, 473, 1261, 578, 1361, 475, 1607, + /* 620 */ 549, 1756, 611, 577, 28, 1299, 1337, 354, 1259, 474, + /* 630 */ 33, 32, 1456, 450, 40, 38, 36, 35, 34, 538, + /* 640 */ 474, 137, 136, 608, 607, 606, 1818, 1695, 127, 1332, + /* 650 */ 96, 1787, 580, 1789, 1790, 576, 572, 571, 175, 74, + /* 660 */ 1864, 1267, 526, 609, 1863, 1860, 1647, 1930, 554, 512, + /* 670 */ 33, 32, 122, 382, 40, 38, 36, 35, 34, 27, + /* 680 */ 164, 1599, 9, 1482, 1927, 1022, 125, 1021, 7, 1366, + /* 690 */ 1604, 33, 32, 1481, 1565, 40, 38, 36, 35, 34, + /* 700 */ 469, 250, 1872, 548, 663, 547, 33, 32, 1930, 1930, + /* 710 */ 40, 38, 36, 35, 34, 1023, 544, 539, 1339, 1340, + /* 720 */ 526, 166, 164, 307, 1756, 1927, 1927, 135, 487, 1404, + /* 730 */ 526, 383, 39, 37, 1756, 1286, 1480, 302, 1879, 1284, + /* 740 */ 339, 389, 1261, 497, 307, 526, 412, 604, 1604, 424, + /* 750 */ 526, 549, 1299, 1337, 1477, 1259, 404, 209, 1604, 61, + /* 760 */ 1359, 405, 1875, 1262, 373, 1260, 397, 1479, 425, 1478, + /* 770 */ 399, 490, 255, 1604, 54, 484, 1332, 1756, 1604, 127, + /* 780 */ 208, 1359, 1475, 375, 371, 419, 1265, 1266, 1267, 1314, + /* 790 */ 1315, 1317, 1318, 1319, 1320, 1321, 1322, 573, 569, 1330, + /* 800 */ 1331, 1333, 1334, 1335, 1336, 1338, 1341, 390, 1756, 2, + /* 810 */ 1756, 1397, 58, 526, 1360, 57, 1879, 125, 505, 386, + /* 820 */ 1289, 33, 32, 1756, 448, 40, 38, 36, 35, 34, + /* 830 */ 1474, 663, 161, 1872, 1873, 1360, 1877, 177, 176, 1505, + /* 840 */ 1874, 1604, 1347, 1009, 1010, 1339, 1340, 423, 1286, 1580, /* 850 */ 418, 417, 416, 415, 414, 411, 410, 409, 408, 407, - /* 860 */ 403, 402, 401, 400, 394, 393, 392, 391, 1880, 388, - /* 870 */ 387, 535, 1422, 1423, 1425, 1426, 29, 337, 1357, 1358, - /* 880 */ 1359, 1360, 1361, 1365, 1366, 1367, 1368, 1350, 61, 1761, - /* 890 */ 1265, 609, 1263, 1289, 1650, 1935, 1400, 29, 337, 1357, - /* 900 */ 1358, 1359, 1360, 1361, 1365, 1366, 1367, 1368, 166, 1583, - /* 910 */ 1791, 1473, 1932, 1268, 1269, 1472, 1317, 1318, 1320, 1321, - /* 920 */ 1322, 1323, 1324, 1325, 573, 569, 1333, 1334, 1336, 1337, - /* 930 */ 1338, 1339, 1341, 1344, 623, 147, 1579, 1791, 1809, 526, - /* 940 */ 279, 611, 610, 256, 1319, 1650, 578, 1884, 1471, 1470, - /* 950 */ 449, 1761, 1761, 577, 277, 60, 1761, 475, 59, 1292, - /* 960 */ 137, 136, 608, 607, 606, 1809, 554, 1607, 1289, 613, - /* 970 */ 1568, 1879, 135, 578, 181, 429, 427, 1823, 1761, 474, - /* 980 */ 577, 94, 1792, 580, 1794, 1795, 576, 536, 571, 1761, - /* 990 */ 1761, 1869, 1780, 554, 468, 306, 1865, 273, 53, 509, - /* 1000 */ 1637, 1659, 1396, 1775, 1823, 526, 63, 1935, 94, 1792, - /* 1010 */ 580, 1794, 1795, 576, 526, 571, 1604, 1658, 1869, 54, - /* 1020 */ 167, 1748, 306, 1865, 1932, 1736, 1519, 202, 1771, 1777, - /* 1030 */ 200, 336, 335, 1607, 1935, 1462, 1463, 558, 526, 526, - /* 1040 */ 571, 1278, 1607, 1273, 93, 526, 526, 165, 483, 506, - /* 1050 */ 510, 1932, 1340, 561, 1271, 326, 228, 522, 526, 204, - /* 1060 */ 526, 1791, 203, 146, 499, 526, 1607, 1607, 361, 524, - /* 1070 */ 1319, 525, 1609, 1607, 1607, 1335, 262, 41, 222, 68, - /* 1080 */ 67, 381, 342, 526, 172, 1272, 1607, 1270, 1607, 1809, - /* 1090 */ 146, 131, 245, 1607, 346, 206, 233, 578, 205, 1609, - /* 1100 */ 301, 567, 1761, 369, 577, 367, 363, 359, 356, 353, - /* 1110 */ 345, 1607, 1782, 208, 134, 135, 207, 1810, 146, 1514, - /* 1120 */ 1399, 1512, 51, 1791, 1213, 226, 237, 1609, 1823, 556, - /* 1130 */ 566, 51, 95, 1792, 580, 1794, 1795, 576, 519, 571, - /* 1140 */ 41, 485, 1869, 488, 168, 1319, 330, 1865, 1948, 11, - /* 1150 */ 10, 1809, 616, 41, 617, 1784, 350, 1903, 584, 578, - /* 1160 */ 134, 230, 1112, 1503, 1761, 1647, 577, 135, 119, 1421, - /* 1170 */ 134, 1899, 550, 240, 1069, 1791, 1067, 255, 1370, 250, - /* 1180 */ 1276, 258, 260, 3, 5, 355, 313, 1326, 1050, 1279, - /* 1190 */ 1823, 1274, 360, 1229, 95, 1792, 580, 1794, 1795, 576, - /* 1200 */ 272, 571, 269, 1809, 1869, 1140, 1508, 1144, 330, 1865, - /* 1210 */ 1948, 578, 1282, 1284, 1151, 1149, 1761, 138, 577, 1926, - /* 1220 */ 175, 1051, 1275, 1287, 569, 1333, 1334, 1336, 1337, 1338, - /* 1230 */ 1339, 1791, 385, 1354, 406, 1700, 413, 421, 420, 1293, - /* 1240 */ 559, 1791, 1823, 422, 426, 431, 95, 1792, 580, 1794, - /* 1250 */ 1795, 576, 428, 571, 658, 439, 1869, 430, 562, 1809, - /* 1260 */ 330, 1865, 1948, 1295, 442, 443, 184, 578, 1294, 1809, - /* 1270 */ 186, 1888, 1761, 1296, 577, 444, 445, 578, 189, 447, - /* 1280 */ 191, 72, 1761, 73, 577, 451, 470, 554, 195, 472, - /* 1290 */ 1791, 304, 1597, 199, 118, 1593, 1741, 554, 1823, 501, - /* 1300 */ 201, 140, 286, 1792, 580, 1794, 1795, 576, 1823, 571, - /* 1310 */ 141, 1595, 286, 1792, 580, 1794, 1795, 576, 1809, 571, - /* 1320 */ 1591, 142, 143, 212, 270, 500, 578, 215, 1935, 507, - /* 1330 */ 504, 1761, 511, 577, 322, 219, 534, 514, 1935, 132, - /* 1340 */ 1740, 167, 1710, 520, 517, 1932, 133, 324, 81, 521, - /* 1350 */ 1791, 165, 1292, 530, 271, 1932, 83, 1823, 1608, 235, - /* 1360 */ 1791, 96, 1792, 580, 1794, 1795, 576, 1900, 571, 537, - /* 1370 */ 239, 1869, 532, 1910, 6, 565, 1865, 533, 1809, 546, - /* 1380 */ 329, 1909, 540, 531, 529, 244, 578, 1891, 1809, 528, - /* 1390 */ 1396, 1761, 1291, 577, 154, 126, 578, 249, 563, 560, - /* 1400 */ 246, 1761, 48, 577, 1885, 247, 331, 248, 85, 1791, - /* 1410 */ 582, 1651, 1580, 265, 274, 659, 660, 1823, 1931, 662, - /* 1420 */ 52, 149, 1792, 580, 1794, 1795, 576, 1823, 571, 1951, - /* 1430 */ 153, 96, 1792, 580, 1794, 1795, 576, 1809, 571, 557, - /* 1440 */ 1755, 1869, 323, 287, 297, 578, 1866, 1850, 296, 254, - /* 1450 */ 1761, 276, 577, 564, 1754, 278, 257, 259, 65, 1753, - /* 1460 */ 1791, 1752, 66, 1749, 357, 555, 1949, 358, 1256, 1257, - /* 1470 */ 171, 362, 1747, 364, 365, 366, 1823, 1746, 1745, 368, - /* 1480 */ 295, 1792, 580, 1794, 1795, 576, 370, 571, 1809, 1744, - /* 1490 */ 372, 1743, 374, 527, 1232, 1231, 578, 1721, 1720, 379, - /* 1500 */ 380, 1761, 1201, 577, 1719, 1718, 1693, 129, 1692, 1691, - /* 1510 */ 1690, 69, 1791, 1689, 1688, 1687, 1686, 1685, 395, 396, - /* 1520 */ 1684, 398, 1791, 130, 1669, 1668, 1667, 1823, 1683, 1682, - /* 1530 */ 1681, 295, 1792, 580, 1794, 1795, 576, 1680, 571, 1791, - /* 1540 */ 1809, 1679, 1678, 1677, 1676, 1675, 1674, 1673, 578, 1672, - /* 1550 */ 1809, 1671, 1670, 1761, 1666, 577, 1665, 1664, 578, 1663, - /* 1560 */ 1203, 1662, 1661, 1761, 1660, 577, 1534, 1809, 179, 1533, - /* 1570 */ 1531, 1499, 120, 182, 180, 575, 1498, 158, 435, 1823, - /* 1580 */ 1761, 1013, 577, 290, 1792, 580, 1794, 1795, 576, 1823, - /* 1590 */ 571, 190, 1012, 149, 1792, 580, 1794, 1795, 576, 1791, - /* 1600 */ 571, 437, 1734, 183, 121, 1728, 1823, 1717, 1716, 1702, - /* 1610 */ 294, 1792, 580, 1794, 1795, 576, 1791, 571, 188, 1842, - /* 1620 */ 1586, 545, 1043, 1530, 1528, 452, 454, 1809, 1526, 453, - /* 1630 */ 456, 457, 338, 458, 1524, 578, 460, 462, 1950, 461, - /* 1640 */ 1761, 1522, 577, 465, 1809, 464, 1511, 1510, 1495, 340, - /* 1650 */ 466, 1588, 578, 1155, 1154, 1587, 50, 1761, 631, 577, - /* 1660 */ 1080, 1077, 633, 1520, 198, 1076, 1823, 1075, 1515, 1513, - /* 1670 */ 295, 1792, 580, 1794, 1795, 576, 318, 571, 319, 320, - /* 1680 */ 486, 1494, 1493, 1823, 1791, 489, 197, 295, 1792, 580, - /* 1690 */ 1794, 1795, 576, 491, 571, 1492, 493, 495, 97, 1733, - /* 1700 */ 152, 1238, 1791, 1727, 216, 467, 463, 459, 455, 196, - /* 1710 */ 56, 502, 1809, 144, 1715, 1713, 1714, 1712, 1711, 221, - /* 1720 */ 578, 1248, 15, 1709, 227, 1761, 79, 577, 1701, 503, - /* 1730 */ 1809, 321, 508, 80, 232, 518, 41, 87, 578, 229, - /* 1740 */ 47, 75, 16, 1761, 194, 577, 243, 242, 82, 25, - /* 1750 */ 17, 1823, 1436, 23, 234, 280, 1792, 580, 1794, 1795, - /* 1760 */ 576, 1791, 571, 236, 1418, 515, 238, 1782, 151, 1823, - /* 1770 */ 1420, 252, 241, 281, 1792, 580, 1794, 1795, 576, 24, - /* 1780 */ 571, 1413, 1393, 46, 1781, 86, 18, 155, 1392, 1809, - /* 1790 */ 1448, 1453, 1442, 1447, 332, 1452, 1451, 578, 333, 10, - /* 1800 */ 45, 1280, 1761, 1330, 577, 1355, 193, 187, 13, 192, - /* 1810 */ 1791, 19, 1328, 446, 1327, 156, 1826, 169, 570, 31, - /* 1820 */ 12, 20, 1310, 21, 583, 1141, 341, 1138, 1823, 185, - /* 1830 */ 587, 1791, 282, 1792, 580, 1794, 1795, 576, 1809, 571, - /* 1840 */ 585, 588, 581, 1135, 579, 590, 578, 1129, 593, 596, - /* 1850 */ 1118, 1761, 1127, 577, 591, 594, 597, 1133, 1132, 1809, - /* 1860 */ 1131, 1130, 88, 89, 263, 603, 1150, 578, 1146, 62, - /* 1870 */ 1041, 1072, 1761, 612, 577, 1071, 1070, 1823, 1068, 1066, - /* 1880 */ 1065, 289, 1792, 580, 1794, 1795, 576, 1064, 571, 1791, - /* 1890 */ 1087, 621, 264, 1062, 1061, 1060, 1059, 1058, 1823, 1791, - /* 1900 */ 1057, 1056, 291, 1792, 580, 1794, 1795, 576, 1047, 571, - /* 1910 */ 1084, 1082, 1053, 1052, 1049, 1048, 1046, 1809, 1527, 641, - /* 1920 */ 1525, 642, 643, 645, 647, 578, 1523, 1809, 649, 646, - /* 1930 */ 1761, 651, 577, 1521, 650, 578, 653, 655, 654, 1509, - /* 1940 */ 1761, 657, 577, 1491, 1003, 267, 661, 1466, 1466, 1266, - /* 1950 */ 275, 1791, 664, 1466, 665, 1466, 1823, 1466, 1466, 1466, - /* 1960 */ 283, 1792, 580, 1794, 1795, 576, 1823, 571, 1791, 1466, - /* 1970 */ 292, 1792, 580, 1794, 1795, 576, 1466, 571, 1466, 1809, - /* 1980 */ 1466, 1466, 1466, 1466, 1466, 1466, 1466, 578, 1466, 1466, - /* 1990 */ 1466, 1466, 1761, 1466, 577, 1466, 1809, 1466, 1466, 1466, - /* 2000 */ 1466, 1466, 1466, 1466, 578, 1466, 1466, 1466, 1466, 1761, - /* 2010 */ 1466, 577, 1466, 1466, 1466, 1466, 1466, 1791, 1823, 1466, - /* 2020 */ 1466, 1466, 284, 1792, 580, 1794, 1795, 576, 1466, 571, - /* 2030 */ 1466, 1466, 1466, 1466, 1791, 1823, 1466, 1466, 1466, 293, - /* 2040 */ 1792, 580, 1794, 1795, 576, 1809, 571, 1466, 1466, 1466, - /* 2050 */ 1466, 1466, 1466, 578, 1466, 1466, 1466, 1466, 1761, 1466, - /* 2060 */ 577, 1466, 1809, 1466, 1466, 1466, 1466, 1466, 1466, 1466, - /* 2070 */ 578, 1466, 1466, 1466, 1466, 1761, 1466, 577, 1466, 1466, - /* 2080 */ 1466, 1466, 1466, 1791, 1823, 1466, 1466, 1466, 285, 1792, - /* 2090 */ 580, 1794, 1795, 576, 1466, 571, 1466, 1466, 1466, 1466, - /* 2100 */ 1466, 1823, 1466, 1466, 1466, 298, 1792, 580, 1794, 1795, - /* 2110 */ 576, 1809, 571, 1466, 1466, 1466, 1466, 1466, 1466, 578, - /* 2120 */ 1466, 1466, 1466, 1466, 1761, 1466, 577, 1466, 1466, 1466, - /* 2130 */ 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1791, 1466, 1466, - /* 2140 */ 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1791, 1466, 1466, - /* 2150 */ 1823, 1466, 1466, 1466, 299, 1792, 580, 1794, 1795, 576, - /* 2160 */ 1466, 571, 1466, 1466, 1466, 1809, 1466, 1466, 1466, 1466, - /* 2170 */ 1466, 1466, 1466, 578, 1466, 1809, 1466, 1466, 1761, 1466, - /* 2180 */ 577, 1466, 1466, 578, 1466, 1466, 1466, 1466, 1761, 1466, - /* 2190 */ 577, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1791, 1466, - /* 2200 */ 1466, 1466, 1466, 1466, 1823, 1466, 1466, 1466, 1803, 1792, - /* 2210 */ 580, 1794, 1795, 576, 1823, 571, 1791, 1466, 1802, 1792, - /* 2220 */ 580, 1794, 1795, 576, 1466, 571, 1809, 1466, 1466, 1466, - /* 2230 */ 1466, 1466, 1466, 1466, 578, 1466, 1466, 1466, 1466, 1761, - /* 2240 */ 1466, 577, 1466, 1466, 1809, 1466, 1466, 1466, 1466, 1466, - /* 2250 */ 1466, 1466, 578, 1466, 1466, 1466, 1466, 1761, 1466, 577, - /* 2260 */ 1466, 1466, 1466, 1466, 1466, 1823, 1466, 1466, 1466, 1801, - /* 2270 */ 1792, 580, 1794, 1795, 576, 1791, 571, 1466, 1466, 1466, - /* 2280 */ 1466, 1466, 1466, 1823, 1466, 1466, 1466, 310, 1792, 580, - /* 2290 */ 1794, 1795, 576, 1466, 571, 1466, 1791, 1466, 1466, 1466, - /* 2300 */ 1466, 1466, 1466, 1809, 1466, 1466, 1466, 1466, 1466, 1466, - /* 2310 */ 1466, 578, 1466, 1466, 1466, 1466, 1761, 1466, 577, 1466, - /* 2320 */ 1466, 1466, 1466, 1466, 1809, 1466, 1466, 1466, 1466, 1466, - /* 2330 */ 1466, 1466, 578, 1466, 1466, 1466, 1466, 1761, 1466, 577, - /* 2340 */ 1466, 1466, 1823, 1466, 1466, 1466, 309, 1792, 580, 1794, - /* 2350 */ 1795, 576, 1791, 571, 1466, 1466, 1466, 1466, 1466, 1466, - /* 2360 */ 1466, 1466, 1791, 1823, 1466, 1466, 1466, 311, 1792, 580, - /* 2370 */ 1794, 1795, 576, 1466, 571, 1466, 1466, 1466, 1466, 1466, - /* 2380 */ 1809, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 578, 1466, - /* 2390 */ 1809, 1466, 1466, 1761, 549, 577, 1466, 1466, 578, 1466, - /* 2400 */ 1466, 1466, 1466, 1761, 1466, 577, 1466, 1466, 1466, 1466, - /* 2410 */ 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1823, - /* 2420 */ 1466, 1466, 127, 308, 1792, 580, 1794, 1795, 576, 1823, - /* 2430 */ 571, 1466, 1466, 288, 1792, 580, 1794, 1795, 576, 1466, - /* 2440 */ 571, 549, 554, 1466, 1466, 1466, 1466, 1466, 1466, 1466, - /* 2450 */ 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, - /* 2460 */ 1466, 125, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 127, - /* 2470 */ 1466, 1466, 1466, 1466, 1466, 1466, 251, 1877, 548, 1466, - /* 2480 */ 547, 1466, 1466, 1935, 1466, 1466, 1466, 1466, 1466, 554, - /* 2490 */ 1466, 1466, 1466, 1466, 1466, 1466, 167, 1466, 1466, 1466, - /* 2500 */ 1932, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 125, 1466, - /* 2510 */ 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, - /* 2520 */ 1466, 1466, 1466, 251, 1877, 548, 1466, 547, 1466, 1466, - /* 2530 */ 1935, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, - /* 2540 */ 1466, 1466, 1466, 165, 1466, 1466, 1466, 1932, + /* 860 */ 403, 402, 401, 400, 394, 393, 392, 391, 549, 388, + /* 870 */ 387, 1756, 1656, 616, 1473, 1393, 29, 337, 1354, 1355, + /* 880 */ 1356, 1357, 1358, 1362, 1363, 1364, 1365, 658, 1655, 468, + /* 890 */ 1262, 610, 1260, 1286, 1647, 1068, 127, 29, 337, 1354, + /* 900 */ 1355, 1356, 1357, 1358, 1362, 1363, 1364, 1365, 272, 613, + /* 910 */ 536, 1634, 1316, 1265, 1266, 1756, 1314, 1315, 1317, 1318, + /* 920 */ 1319, 1320, 1321, 1322, 573, 569, 1330, 1331, 1333, 1334, + /* 930 */ 1335, 1336, 1338, 1341, 125, 147, 1472, 1786, 561, 352, + /* 940 */ 278, 351, 1884, 1393, 1743, 482, 481, 1516, 1774, 162, + /* 950 */ 1872, 1873, 123, 1877, 276, 60, 1805, 232, 59, 1770, + /* 960 */ 526, 526, 477, 480, 1471, 1804, 44, 4, 244, 483, + /* 970 */ 145, 449, 1601, 578, 180, 429, 427, 1756, 1756, 1930, + /* 980 */ 577, 1930, 1500, 1766, 1772, 334, 1470, 1786, 1604, 1604, + /* 990 */ 526, 361, 164, 554, 164, 571, 1927, 216, 1927, 498, + /* 1000 */ 556, 499, 1469, 1818, 1468, 1756, 63, 94, 1787, 580, + /* 1010 */ 1789, 1790, 576, 526, 571, 1804, 558, 1864, 1604, 336, + /* 1020 */ 335, 306, 1860, 578, 506, 1316, 526, 1756, 1756, 1275, + /* 1030 */ 577, 201, 77, 1930, 199, 1396, 1644, 510, 526, 1930, + /* 1040 */ 1337, 1604, 1268, 1756, 93, 1756, 166, 1511, 1467, 227, + /* 1050 */ 1927, 350, 164, 1818, 1604, 1786, 1927, 95, 1787, 580, + /* 1060 */ 1789, 1790, 576, 1332, 571, 526, 1604, 1864, 41, 485, + /* 1070 */ 1316, 330, 1860, 1943, 526, 1267, 522, 53, 509, 68, + /* 1080 */ 67, 381, 1898, 1804, 171, 524, 221, 203, 526, 1756, + /* 1090 */ 202, 578, 205, 1604, 207, 204, 1756, 206, 577, 525, + /* 1100 */ 301, 1509, 1604, 369, 1894, 367, 363, 359, 356, 353, + /* 1110 */ 1270, 554, 131, 526, 1786, 1212, 1604, 1269, 566, 567, + /* 1120 */ 526, 1818, 134, 488, 261, 94, 1787, 580, 1789, 1790, + /* 1130 */ 576, 346, 571, 225, 135, 1864, 51, 550, 666, 306, + /* 1140 */ 1860, 1604, 1804, 562, 167, 236, 51, 323, 1604, 41, + /* 1150 */ 578, 1930, 267, 90, 1786, 1756, 617, 577, 41, 519, + /* 1160 */ 1777, 11, 10, 87, 164, 249, 156, 3, 1927, 229, + /* 1170 */ 254, 656, 652, 648, 644, 265, 584, 1276, 1066, 1271, + /* 1180 */ 1818, 1111, 1804, 1418, 294, 1787, 580, 1789, 1790, 576, + /* 1190 */ 578, 571, 239, 1367, 1786, 1756, 1323, 577, 1459, 1460, + /* 1200 */ 1279, 1281, 257, 1779, 259, 271, 134, 92, 135, 5, + /* 1210 */ 230, 1049, 569, 1330, 1331, 1333, 1334, 1335, 1336, 559, + /* 1220 */ 1818, 360, 1804, 1139, 95, 1787, 580, 1789, 1790, 576, + /* 1230 */ 578, 571, 268, 355, 1864, 1756, 119, 577, 330, 1860, + /* 1240 */ 1943, 134, 549, 523, 1050, 313, 1228, 1273, 174, 1921, + /* 1250 */ 385, 1351, 1284, 1143, 1272, 1150, 406, 413, 1697, 421, + /* 1260 */ 1818, 420, 422, 1786, 95, 1787, 580, 1789, 1790, 576, + /* 1270 */ 127, 571, 426, 428, 1864, 219, 430, 1290, 330, 1860, + /* 1280 */ 1943, 431, 439, 1148, 1292, 442, 183, 443, 138, 1883, + /* 1290 */ 554, 1804, 1291, 1235, 185, 212, 444, 1293, 188, 578, + /* 1300 */ 445, 190, 447, 72, 1756, 73, 577, 451, 125, 194, + /* 1310 */ 470, 472, 1594, 198, 118, 1590, 304, 1786, 200, 554, + /* 1320 */ 140, 269, 141, 250, 1872, 548, 1592, 547, 1588, 1818, + /* 1330 */ 1930, 142, 143, 285, 1787, 580, 1789, 1790, 576, 211, + /* 1340 */ 571, 500, 1736, 164, 214, 1804, 507, 1927, 504, 511, + /* 1350 */ 218, 322, 534, 578, 514, 520, 501, 1735, 1756, 1930, + /* 1360 */ 577, 132, 1707, 517, 324, 1289, 81, 1786, 521, 133, + /* 1370 */ 270, 83, 166, 554, 537, 1605, 1927, 530, 1905, 234, + /* 1380 */ 1895, 238, 6, 1818, 1786, 532, 533, 285, 1787, 580, + /* 1390 */ 1789, 1790, 576, 329, 571, 1804, 546, 531, 540, 529, + /* 1400 */ 528, 248, 1288, 578, 1393, 126, 563, 560, 1756, 48, + /* 1410 */ 577, 1880, 1804, 1930, 1904, 85, 1648, 331, 1577, 659, + /* 1420 */ 578, 582, 264, 660, 243, 1756, 164, 577, 153, 1886, + /* 1430 */ 1927, 247, 245, 1818, 1786, 246, 253, 96, 1787, 580, + /* 1440 */ 1789, 1790, 576, 1845, 571, 273, 662, 1864, 299, 275, + /* 1450 */ 1818, 565, 1860, 256, 149, 1787, 580, 1789, 1790, 576, + /* 1460 */ 1786, 571, 1804, 52, 1946, 1926, 557, 286, 296, 258, + /* 1470 */ 578, 564, 295, 1750, 277, 1756, 1749, 577, 65, 1748, + /* 1480 */ 1747, 66, 1744, 357, 358, 1253, 1254, 170, 1804, 362, + /* 1490 */ 1742, 364, 365, 527, 366, 1741, 578, 368, 555, 1944, + /* 1500 */ 1818, 1756, 1740, 577, 96, 1787, 580, 1789, 1790, 576, + /* 1510 */ 1786, 571, 370, 1739, 1864, 372, 1738, 1230, 374, 1861, + /* 1520 */ 1231, 1718, 1786, 379, 380, 1716, 1818, 1717, 1715, 1690, + /* 1530 */ 294, 1787, 580, 1789, 1790, 576, 1786, 571, 1804, 1689, + /* 1540 */ 1200, 129, 1688, 1687, 69, 1686, 578, 395, 1681, 396, + /* 1550 */ 1804, 1756, 1685, 577, 1684, 1683, 1682, 398, 578, 1680, + /* 1560 */ 1679, 1678, 1677, 1756, 1804, 577, 1676, 1675, 1674, 1673, + /* 1570 */ 1672, 1671, 575, 1670, 1669, 1668, 1818, 1756, 1667, 577, + /* 1580 */ 289, 1787, 580, 1789, 1790, 576, 130, 571, 1818, 1786, + /* 1590 */ 1666, 1665, 149, 1787, 580, 1789, 1790, 576, 1664, 571, + /* 1600 */ 1663, 1662, 1818, 1202, 1660, 1659, 293, 1787, 580, 1789, + /* 1610 */ 1790, 576, 1661, 571, 1658, 1837, 1657, 1804, 545, 1531, + /* 1620 */ 178, 1530, 338, 120, 181, 578, 196, 1528, 179, 1496, + /* 1630 */ 1756, 157, 577, 435, 1012, 437, 1011, 1945, 1495, 182, + /* 1640 */ 152, 121, 1786, 452, 453, 467, 463, 459, 455, 195, + /* 1650 */ 1731, 1725, 1714, 189, 1786, 1818, 187, 1713, 1699, 294, + /* 1660 */ 1787, 580, 1789, 1790, 576, 1583, 571, 1527, 1786, 1042, + /* 1670 */ 1804, 1525, 454, 1523, 456, 340, 458, 457, 578, 1521, + /* 1680 */ 460, 75, 1804, 1756, 193, 577, 462, 461, 1519, 464, + /* 1690 */ 578, 465, 466, 1508, 1507, 1756, 1804, 577, 1492, 1585, + /* 1700 */ 1153, 1154, 197, 1584, 578, 1079, 1074, 50, 1818, 1756, + /* 1710 */ 1517, 577, 294, 1787, 580, 1789, 1790, 576, 631, 571, + /* 1720 */ 1818, 1076, 1786, 633, 279, 1787, 580, 1789, 1790, 576, + /* 1730 */ 1075, 571, 1512, 318, 1818, 319, 1786, 1510, 280, 1787, + /* 1740 */ 580, 1789, 1790, 576, 320, 571, 192, 186, 1786, 191, + /* 1750 */ 1804, 486, 489, 446, 1491, 491, 1490, 1489, 578, 493, + /* 1760 */ 495, 97, 1730, 1756, 1804, 577, 1237, 56, 1724, 184, + /* 1770 */ 502, 1712, 578, 1710, 508, 503, 1804, 1756, 215, 577, + /* 1780 */ 1711, 1709, 321, 1708, 578, 15, 144, 220, 1818, 1756, + /* 1790 */ 1245, 577, 281, 1787, 580, 1789, 1790, 576, 1706, 571, + /* 1800 */ 1698, 226, 1818, 518, 79, 1786, 288, 1787, 580, 1789, + /* 1810 */ 1790, 576, 228, 571, 1818, 1786, 515, 80, 290, 1787, + /* 1820 */ 580, 1789, 1790, 576, 82, 571, 87, 41, 231, 23, + /* 1830 */ 47, 1786, 1433, 1804, 233, 241, 235, 1415, 237, 242, + /* 1840 */ 1417, 578, 16, 1804, 25, 1777, 1756, 151, 577, 240, + /* 1850 */ 24, 578, 46, 1410, 86, 1786, 1756, 17, 577, 1804, + /* 1860 */ 1390, 251, 1389, 1776, 154, 1450, 45, 578, 18, 1439, + /* 1870 */ 1445, 1818, 1756, 13, 577, 282, 1787, 580, 1789, 1790, + /* 1880 */ 576, 1818, 571, 1804, 1444, 291, 1787, 580, 1789, 1790, + /* 1890 */ 576, 578, 571, 332, 1449, 1448, 1756, 1818, 577, 333, + /* 1900 */ 10, 283, 1787, 580, 1789, 1790, 576, 1277, 571, 1352, + /* 1910 */ 19, 1786, 1821, 1307, 1327, 570, 155, 1325, 31, 581, + /* 1920 */ 1324, 1818, 12, 20, 168, 292, 1787, 580, 1789, 1790, + /* 1930 */ 576, 1786, 571, 21, 583, 1140, 341, 585, 579, 1804, + /* 1940 */ 1137, 587, 588, 590, 1134, 591, 593, 578, 596, 1132, + /* 1950 */ 594, 1786, 1756, 1128, 577, 1126, 1131, 597, 1117, 1804, + /* 1960 */ 1130, 88, 1149, 603, 1129, 89, 62, 578, 262, 1145, + /* 1970 */ 612, 1786, 1756, 1071, 577, 1070, 1040, 1818, 1069, 1804, + /* 1980 */ 1067, 284, 1787, 580, 1789, 1790, 576, 578, 571, 1065, + /* 1990 */ 1064, 1786, 1756, 1063, 577, 263, 1086, 1818, 1061, 1804, + /* 2000 */ 1060, 297, 1787, 580, 1789, 1790, 576, 578, 571, 621, + /* 2010 */ 1059, 1058, 1756, 1057, 577, 1056, 1055, 1818, 1083, 1804, + /* 2020 */ 1081, 298, 1787, 580, 1789, 1790, 576, 578, 571, 1052, + /* 2030 */ 1051, 1786, 1756, 1048, 577, 1047, 1046, 1818, 1045, 1524, + /* 2040 */ 641, 1798, 1787, 580, 1789, 1790, 576, 1786, 571, 642, + /* 2050 */ 643, 1522, 645, 646, 647, 1520, 649, 1818, 651, 1804, + /* 2060 */ 650, 1797, 1787, 580, 1789, 1790, 576, 578, 571, 1518, + /* 2070 */ 653, 654, 1756, 655, 577, 1804, 1506, 657, 1002, 1488, + /* 2080 */ 266, 661, 664, 578, 1263, 274, 665, 1463, 1756, 1463, + /* 2090 */ 577, 1463, 1463, 1463, 1463, 1463, 1463, 1818, 1786, 1463, + /* 2100 */ 1463, 1796, 1787, 580, 1789, 1790, 576, 1463, 571, 1463, + /* 2110 */ 1463, 1463, 1463, 1818, 1786, 1463, 1463, 310, 1787, 580, + /* 2120 */ 1789, 1790, 576, 1463, 571, 1463, 1804, 1463, 1463, 1463, + /* 2130 */ 1463, 1463, 1463, 1463, 578, 1463, 1463, 1463, 1463, 1756, + /* 2140 */ 1463, 577, 1804, 1463, 1463, 1463, 1463, 1463, 1463, 1463, + /* 2150 */ 578, 1463, 1463, 1463, 1463, 1756, 1463, 577, 1463, 1463, + /* 2160 */ 1463, 1463, 1463, 1463, 1818, 1786, 1463, 1463, 309, 1787, + /* 2170 */ 580, 1789, 1790, 576, 1463, 571, 1463, 1463, 1463, 1463, + /* 2180 */ 1818, 1786, 1463, 1463, 311, 1787, 580, 1789, 1790, 576, + /* 2190 */ 1463, 571, 1463, 1804, 1463, 1463, 1463, 1463, 1463, 1463, + /* 2200 */ 1463, 578, 1463, 1463, 1463, 1463, 1756, 1463, 577, 1804, + /* 2210 */ 1463, 1463, 1463, 1463, 1463, 1463, 1463, 578, 1463, 1463, + /* 2220 */ 1463, 1463, 1756, 1463, 577, 1463, 1463, 1463, 1463, 1463, + /* 2230 */ 1463, 1818, 1463, 1463, 1463, 308, 1787, 580, 1789, 1790, + /* 2240 */ 576, 1463, 571, 1463, 1463, 1463, 1463, 1818, 1463, 1463, + /* 2250 */ 1463, 287, 1787, 580, 1789, 1790, 576, 1463, 571, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 316, 390, 391, 316, 316, 312, 316, 314, 315, 1, - /* 10 */ 2, 327, 12, 13, 327, 350, 351, 327, 334, 364, - /* 20 */ 20, 0, 22, 4, 334, 20, 20, 22, 344, 308, - /* 30 */ 338, 344, 344, 33, 344, 35, 312, 20, 314, 315, - /* 40 */ 35, 349, 21, 338, 20, 24, 25, 26, 27, 28, - /* 50 */ 29, 30, 31, 32, 349, 50, 56, 336, 321, 4, - /* 60 */ 405, 61, 325, 44, 45, 344, 374, 375, 68, 60, - /* 70 */ 349, 383, 351, 418, 12, 13, 14, 422, 386, 374, - /* 80 */ 375, 376, 20, 0, 22, 397, 398, 399, 400, 89, - /* 90 */ 402, 386, 336, 322, 323, 33, 375, 35, 90, 343, - /* 100 */ 379, 380, 381, 382, 383, 384, 21, 386, 352, 347, - /* 110 */ 389, 111, 350, 351, 393, 394, 395, 351, 56, 34, - /* 120 */ 405, 36, 20, 61, 358, 125, 126, 361, 407, 20, - /* 130 */ 68, 125, 126, 418, 8, 9, 415, 422, 12, 13, - /* 140 */ 14, 15, 16, 307, 89, 309, 63, 64, 65, 66, + /* 0 */ 312, 404, 314, 315, 337, 312, 316, 314, 315, 351, + /* 10 */ 389, 390, 12, 13, 417, 328, 358, 327, 421, 361, + /* 20 */ 20, 0, 22, 336, 334, 12, 13, 14, 15, 16, + /* 30 */ 308, 20, 345, 33, 344, 35, 347, 20, 20, 350, + /* 40 */ 351, 364, 21, 338, 316, 24, 25, 26, 27, 28, + /* 50 */ 29, 30, 31, 32, 349, 327, 56, 313, 336, 4, + /* 60 */ 316, 61, 334, 14, 15, 16, 344, 307, 68, 309, + /* 70 */ 60, 349, 344, 351, 12, 13, 14, 338, 373, 374, + /* 80 */ 375, 404, 20, 0, 22, 336, 56, 20, 349, 89, + /* 90 */ 385, 4, 336, 344, 417, 33, 374, 35, 421, 343, + /* 100 */ 378, 379, 380, 381, 382, 383, 89, 385, 352, 335, + /* 110 */ 388, 111, 373, 374, 392, 393, 394, 351, 56, 89, + /* 120 */ 404, 91, 348, 61, 385, 125, 126, 361, 406, 380, + /* 130 */ 68, 44, 45, 417, 8, 9, 414, 421, 12, 13, + /* 140 */ 14, 15, 16, 20, 89, 89, 63, 64, 65, 66, /* 150 */ 67, 89, 69, 70, 71, 72, 73, 74, 75, 76, /* 160 */ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, /* 170 */ 170, 21, 172, 111, 24, 25, 26, 27, 28, 29, - /* 180 */ 30, 31, 32, 336, 14, 15, 16, 125, 126, 120, - /* 190 */ 121, 344, 316, 193, 194, 60, 196, 197, 198, 199, + /* 180 */ 30, 31, 32, 313, 338, 20, 316, 125, 126, 120, + /* 190 */ 121, 1, 2, 193, 194, 349, 196, 197, 198, 199, /* 200 */ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, - /* 210 */ 210, 211, 212, 213, 0, 20, 90, 8, 9, 64, - /* 220 */ 65, 12, 13, 14, 15, 16, 71, 227, 381, 353, - /* 230 */ 305, 120, 170, 318, 172, 22, 81, 82, 24, 25, - /* 240 */ 26, 27, 28, 29, 30, 31, 32, 178, 35, 20, - /* 250 */ 181, 22, 43, 56, 339, 193, 194, 308, 196, 197, + /* 210 */ 210, 211, 212, 213, 0, 21, 90, 8, 9, 373, + /* 220 */ 374, 12, 13, 14, 15, 16, 316, 227, 34, 321, + /* 230 */ 36, 385, 170, 325, 172, 22, 121, 327, 24, 25, + /* 240 */ 26, 27, 28, 29, 30, 31, 32, 178, 35, 89, + /* 250 */ 181, 360, 43, 362, 344, 193, 194, 227, 196, 197, /* 260 */ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, - /* 270 */ 208, 209, 210, 211, 212, 213, 12, 13, 308, 50, - /* 280 */ 155, 68, 227, 316, 20, 88, 22, 308, 91, 364, - /* 290 */ 179, 180, 0, 308, 327, 121, 364, 33, 349, 35, + /* 270 */ 208, 209, 210, 211, 212, 213, 12, 13, 350, 351, + /* 280 */ 90, 68, 227, 227, 20, 20, 22, 60, 20, 89, + /* 290 */ 125, 126, 177, 178, 308, 308, 181, 33, 193, 35, /* 300 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - /* 310 */ 112, 344, 114, 115, 116, 117, 118, 119, 405, 349, - /* 320 */ 56, 336, 0, 68, 111, 61, 336, 405, 349, 344, - /* 330 */ 405, 418, 68, 343, 349, 422, 351, 405, 12, 13, - /* 340 */ 418, 4, 352, 418, 422, 20, 20, 422, 22, 316, - /* 350 */ 418, 177, 178, 89, 422, 181, 19, 232, 233, 33, - /* 360 */ 375, 35, 344, 335, 379, 380, 381, 382, 383, 384, - /* 370 */ 33, 386, 89, 355, 389, 111, 348, 344, 393, 394, - /* 380 */ 395, 20, 56, 170, 47, 172, 77, 100, 51, 125, - /* 390 */ 126, 8, 9, 56, 68, 12, 13, 14, 15, 16, - /* 400 */ 415, 322, 323, 12, 13, 14, 193, 194, 308, 318, - /* 410 */ 68, 20, 308, 22, 89, 89, 383, 12, 13, 14, - /* 420 */ 15, 16, 331, 101, 33, 88, 35, 337, 91, 89, - /* 430 */ 339, 398, 399, 400, 170, 402, 172, 111, 129, 130, - /* 440 */ 336, 364, 120, 121, 122, 123, 124, 56, 344, 349, - /* 450 */ 89, 125, 126, 349, 37, 351, 308, 193, 194, 68, + /* 310 */ 112, 328, 114, 115, 116, 117, 118, 119, 20, 336, + /* 320 */ 56, 0, 336, 56, 111, 61, 322, 323, 345, 169, + /* 330 */ 344, 171, 68, 336, 404, 349, 349, 351, 12, 13, + /* 340 */ 343, 236, 237, 238, 239, 240, 20, 417, 22, 352, + /* 350 */ 0, 421, 318, 89, 89, 88, 336, 89, 91, 33, + /* 360 */ 374, 35, 68, 343, 378, 379, 380, 381, 382, 383, + /* 370 */ 100, 385, 352, 339, 388, 111, 328, 337, 392, 393, + /* 380 */ 394, 20, 56, 170, 336, 172, 20, 227, 22, 125, + /* 390 */ 126, 8, 9, 345, 68, 12, 13, 14, 15, 16, + /* 400 */ 414, 404, 316, 12, 13, 14, 193, 194, 308, 308, + /* 410 */ 101, 20, 308, 22, 417, 89, 50, 14, 421, 322, + /* 420 */ 323, 35, 101, 20, 33, 68, 35, 227, 316, 120, + /* 430 */ 121, 122, 123, 124, 170, 344, 172, 111, 308, 353, + /* 440 */ 336, 120, 121, 122, 123, 124, 355, 56, 344, 349, + /* 450 */ 349, 125, 126, 349, 68, 351, 344, 193, 194, 68, /* 460 */ 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, - /* 470 */ 206, 207, 208, 209, 210, 211, 212, 213, 89, 375, - /* 480 */ 89, 320, 405, 379, 380, 381, 382, 383, 384, 385, - /* 490 */ 386, 387, 388, 20, 333, 418, 170, 349, 172, 422, - /* 500 */ 8, 9, 111, 342, 12, 13, 14, 15, 16, 92, - /* 510 */ 227, 94, 95, 308, 97, 364, 125, 126, 101, 193, - /* 520 */ 194, 20, 196, 197, 198, 199, 200, 201, 202, 203, + /* 470 */ 206, 207, 208, 209, 210, 211, 212, 213, 374, 349, + /* 480 */ 89, 337, 378, 379, 380, 381, 382, 383, 384, 385, + /* 490 */ 386, 387, 227, 320, 382, 227, 170, 120, 172, 8, + /* 500 */ 9, 0, 111, 12, 13, 14, 15, 16, 396, 397, + /* 510 */ 398, 399, 337, 401, 308, 342, 125, 126, 157, 193, + /* 520 */ 194, 0, 196, 197, 198, 199, 200, 201, 202, 203, /* 530 */ 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, - /* 540 */ 123, 44, 45, 8, 9, 162, 152, 12, 13, 14, - /* 550 */ 15, 16, 227, 227, 349, 316, 405, 337, 169, 336, - /* 560 */ 171, 170, 89, 172, 2, 0, 327, 227, 345, 418, - /* 570 */ 8, 9, 56, 422, 12, 13, 14, 15, 16, 56, - /* 580 */ 308, 308, 90, 344, 193, 194, 337, 196, 197, 198, + /* 540 */ 360, 316, 362, 8, 9, 162, 318, 12, 13, 14, + /* 550 */ 15, 16, 327, 227, 37, 349, 179, 180, 324, 331, + /* 560 */ 326, 170, 376, 172, 2, 64, 65, 339, 305, 344, + /* 570 */ 8, 9, 71, 0, 12, 13, 14, 15, 16, 337, + /* 580 */ 308, 90, 81, 82, 193, 194, 400, 196, 197, 198, /* 590 */ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, - /* 600 */ 209, 210, 211, 212, 213, 12, 13, 91, 214, 336, - /* 610 */ 364, 377, 89, 20, 91, 22, 227, 344, 224, 308, - /* 620 */ 368, 349, 349, 165, 351, 90, 33, 2, 35, 64, - /* 630 */ 65, 308, 249, 8, 9, 401, 71, 12, 13, 14, - /* 640 */ 15, 16, 184, 185, 14, 337, 81, 82, 375, 56, - /* 650 */ 20, 405, 379, 380, 381, 382, 383, 384, 157, 386, - /* 660 */ 349, 68, 389, 351, 418, 308, 393, 394, 422, 35, - /* 670 */ 8, 9, 349, 361, 12, 13, 14, 15, 16, 19, - /* 680 */ 8, 9, 89, 316, 12, 13, 14, 15, 16, 308, - /* 690 */ 308, 8, 9, 33, 327, 12, 13, 14, 15, 16, - /* 700 */ 227, 360, 68, 362, 111, 313, 349, 47, 316, 344, - /* 710 */ 337, 344, 52, 53, 54, 55, 56, 0, 125, 126, - /* 720 */ 355, 8, 9, 61, 337, 12, 13, 14, 15, 16, - /* 730 */ 349, 349, 12, 13, 316, 337, 316, 18, 308, 20, - /* 740 */ 20, 316, 22, 316, 61, 327, 27, 327, 88, 30, - /* 750 */ 227, 91, 90, 33, 327, 35, 313, 0, 320, 316, - /* 760 */ 98, 14, 344, 170, 344, 172, 47, 20, 49, 344, - /* 770 */ 51, 344, 308, 360, 338, 362, 56, 225, 226, 349, - /* 780 */ 342, 98, 42, 43, 124, 349, 193, 194, 68, 196, + /* 600 */ 209, 210, 211, 212, 213, 12, 13, 101, 336, 92, + /* 610 */ 336, 94, 95, 20, 97, 22, 344, 152, 101, 345, + /* 620 */ 316, 349, 101, 351, 2, 90, 33, 364, 35, 123, + /* 630 */ 8, 9, 249, 60, 12, 13, 14, 15, 16, 155, + /* 640 */ 123, 120, 121, 122, 123, 124, 374, 344, 344, 56, + /* 650 */ 378, 379, 380, 381, 382, 383, 337, 385, 355, 320, + /* 660 */ 388, 68, 316, 346, 392, 393, 349, 404, 364, 364, + /* 670 */ 8, 9, 333, 327, 12, 13, 14, 15, 16, 214, + /* 680 */ 417, 342, 89, 308, 421, 20, 382, 22, 39, 224, + /* 690 */ 344, 8, 9, 308, 325, 12, 13, 14, 15, 16, + /* 700 */ 35, 397, 398, 399, 111, 401, 8, 9, 404, 404, + /* 710 */ 12, 13, 14, 15, 16, 50, 232, 233, 125, 126, + /* 720 */ 316, 417, 417, 61, 349, 421, 421, 43, 4, 14, + /* 730 */ 316, 327, 12, 13, 349, 20, 308, 18, 376, 20, + /* 740 */ 20, 327, 22, 19, 61, 316, 27, 337, 344, 30, + /* 750 */ 316, 316, 90, 33, 309, 35, 327, 33, 344, 3, + /* 760 */ 98, 327, 400, 170, 165, 172, 47, 308, 49, 308, + /* 770 */ 51, 47, 424, 344, 90, 51, 56, 349, 344, 344, + /* 780 */ 56, 98, 308, 184, 185, 77, 193, 194, 68, 196, /* 790 */ 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, - /* 800 */ 207, 208, 209, 210, 211, 212, 213, 88, 383, 89, - /* 810 */ 374, 375, 376, 349, 152, 39, 156, 60, 101, 100, - /* 820 */ 89, 316, 386, 398, 399, 400, 336, 402, 193, 157, - /* 830 */ 99, 111, 327, 343, 174, 152, 176, 120, 121, 122, - /* 840 */ 123, 124, 352, 309, 377, 125, 126, 128, 308, 344, + /* 800 */ 207, 208, 209, 210, 211, 212, 213, 88, 349, 89, + /* 810 */ 349, 4, 88, 316, 152, 91, 376, 382, 368, 100, + /* 820 */ 20, 8, 9, 349, 327, 12, 13, 14, 15, 16, + /* 830 */ 308, 111, 397, 398, 399, 152, 401, 129, 130, 0, + /* 840 */ 400, 344, 14, 44, 45, 125, 126, 128, 20, 0, /* 850 */ 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, - /* 860 */ 141, 142, 143, 144, 145, 146, 147, 148, 401, 150, - /* 870 */ 151, 236, 237, 238, 239, 240, 214, 215, 216, 217, - /* 880 */ 218, 219, 220, 221, 222, 223, 224, 14, 3, 349, - /* 890 */ 170, 346, 172, 20, 349, 405, 4, 214, 215, 216, - /* 900 */ 217, 218, 219, 220, 221, 222, 223, 224, 418, 0, - /* 910 */ 308, 308, 422, 193, 194, 308, 196, 197, 198, 199, + /* 860 */ 141, 142, 143, 144, 145, 146, 147, 148, 316, 150, + /* 870 */ 151, 349, 336, 13, 308, 226, 214, 215, 216, 217, + /* 880 */ 218, 219, 220, 221, 222, 223, 224, 48, 352, 317, + /* 890 */ 170, 346, 172, 20, 349, 35, 344, 214, 215, 216, + /* 900 */ 217, 218, 219, 220, 221, 222, 223, 224, 329, 60, + /* 910 */ 415, 332, 197, 193, 194, 349, 196, 197, 198, 199, /* 920 */ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, - /* 930 */ 210, 211, 212, 213, 324, 18, 326, 308, 336, 316, - /* 940 */ 23, 101, 346, 425, 197, 349, 344, 377, 308, 308, - /* 950 */ 327, 349, 349, 351, 37, 38, 349, 101, 41, 20, - /* 960 */ 120, 121, 122, 123, 124, 336, 364, 344, 20, 60, - /* 970 */ 325, 401, 43, 344, 57, 58, 59, 375, 349, 123, - /* 980 */ 351, 379, 380, 381, 382, 383, 384, 416, 386, 349, - /* 990 */ 349, 389, 338, 364, 317, 393, 394, 329, 157, 158, - /* 1000 */ 332, 336, 226, 349, 375, 316, 89, 405, 379, 380, - /* 1010 */ 381, 382, 383, 384, 316, 386, 327, 352, 389, 90, - /* 1020 */ 418, 0, 393, 394, 422, 327, 0, 93, 374, 375, - /* 1030 */ 96, 12, 13, 344, 405, 125, 126, 43, 316, 316, - /* 1040 */ 386, 22, 344, 35, 127, 316, 316, 418, 22, 327, - /* 1050 */ 327, 422, 33, 43, 35, 328, 327, 327, 316, 93, - /* 1060 */ 316, 308, 96, 336, 371, 316, 344, 344, 47, 327, - /* 1070 */ 197, 327, 345, 344, 344, 56, 327, 43, 43, 162, - /* 1080 */ 163, 164, 328, 316, 167, 35, 344, 68, 344, 336, - /* 1090 */ 336, 43, 412, 344, 327, 93, 157, 344, 96, 345, - /* 1100 */ 183, 61, 349, 186, 351, 188, 189, 190, 191, 192, - /* 1110 */ 328, 344, 46, 93, 43, 43, 96, 336, 336, 0, - /* 1120 */ 228, 0, 43, 308, 90, 90, 43, 345, 375, 244, - /* 1130 */ 111, 43, 379, 380, 381, 382, 383, 384, 90, 386, - /* 1140 */ 43, 22, 389, 22, 227, 197, 393, 394, 395, 1, - /* 1150 */ 2, 336, 13, 43, 13, 89, 317, 404, 43, 344, - /* 1160 */ 43, 90, 90, 315, 349, 348, 351, 43, 43, 90, - /* 1170 */ 43, 378, 403, 90, 35, 308, 35, 419, 90, 396, - /* 1180 */ 172, 419, 419, 406, 229, 373, 372, 90, 35, 170, - /* 1190 */ 375, 172, 47, 168, 379, 380, 381, 382, 383, 384, - /* 1200 */ 90, 386, 366, 336, 389, 90, 0, 90, 393, 394, - /* 1210 */ 395, 344, 193, 194, 90, 90, 349, 90, 351, 404, - /* 1220 */ 42, 68, 172, 20, 205, 206, 207, 208, 209, 210, - /* 1230 */ 211, 308, 356, 193, 316, 316, 356, 152, 354, 20, - /* 1240 */ 246, 308, 375, 354, 316, 310, 379, 380, 381, 382, - /* 1250 */ 383, 384, 316, 386, 48, 310, 389, 316, 248, 336, - /* 1260 */ 393, 394, 395, 20, 370, 351, 320, 344, 20, 336, - /* 1270 */ 320, 404, 349, 20, 351, 363, 365, 344, 320, 363, - /* 1280 */ 320, 320, 349, 320, 351, 316, 310, 364, 320, 336, - /* 1290 */ 308, 310, 336, 336, 316, 336, 349, 364, 375, 369, - /* 1300 */ 336, 336, 379, 380, 381, 382, 383, 384, 375, 386, - /* 1310 */ 336, 336, 379, 380, 381, 382, 383, 384, 336, 386, - /* 1320 */ 336, 336, 336, 318, 370, 175, 344, 318, 405, 316, - /* 1330 */ 351, 349, 316, 351, 363, 318, 234, 349, 405, 359, - /* 1340 */ 349, 418, 349, 154, 349, 422, 359, 349, 318, 357, - /* 1350 */ 308, 418, 20, 349, 332, 422, 318, 375, 344, 359, - /* 1360 */ 308, 379, 380, 381, 382, 383, 384, 378, 386, 235, - /* 1370 */ 359, 389, 349, 411, 241, 393, 394, 349, 336, 161, - /* 1380 */ 349, 411, 349, 243, 242, 413, 344, 414, 336, 230, - /* 1390 */ 226, 349, 20, 351, 411, 344, 344, 373, 247, 245, - /* 1400 */ 410, 349, 89, 351, 377, 409, 250, 408, 89, 308, - /* 1410 */ 340, 349, 326, 318, 316, 36, 311, 375, 421, 310, - /* 1420 */ 367, 379, 380, 381, 382, 383, 384, 375, 386, 426, - /* 1430 */ 362, 379, 380, 381, 382, 383, 384, 336, 386, 421, - /* 1440 */ 0, 389, 341, 330, 330, 344, 394, 392, 330, 420, - /* 1450 */ 349, 319, 351, 421, 0, 306, 420, 420, 177, 0, - /* 1460 */ 308, 0, 42, 0, 35, 423, 424, 187, 35, 35, - /* 1470 */ 35, 187, 0, 35, 35, 187, 375, 0, 0, 187, - /* 1480 */ 379, 380, 381, 382, 383, 384, 35, 386, 336, 0, - /* 1490 */ 22, 0, 35, 341, 172, 170, 344, 0, 0, 166, - /* 1500 */ 165, 349, 46, 351, 0, 0, 0, 42, 0, 0, - /* 1510 */ 0, 149, 308, 0, 0, 0, 0, 0, 144, 35, - /* 1520 */ 0, 144, 308, 42, 0, 0, 0, 375, 0, 0, - /* 1530 */ 0, 379, 380, 381, 382, 383, 384, 0, 386, 308, - /* 1540 */ 336, 0, 0, 0, 0, 0, 0, 0, 344, 0, - /* 1550 */ 336, 0, 0, 349, 0, 351, 0, 0, 344, 0, - /* 1560 */ 22, 0, 0, 349, 0, 351, 0, 336, 56, 0, - /* 1570 */ 0, 0, 39, 42, 56, 344, 0, 43, 46, 375, - /* 1580 */ 349, 14, 351, 379, 380, 381, 382, 383, 384, 375, - /* 1590 */ 386, 161, 14, 379, 380, 381, 382, 383, 384, 308, - /* 1600 */ 386, 46, 0, 40, 39, 0, 375, 0, 0, 0, - /* 1610 */ 379, 380, 381, 382, 383, 384, 308, 386, 39, 388, - /* 1620 */ 0, 417, 62, 0, 0, 35, 39, 336, 0, 47, - /* 1630 */ 35, 47, 341, 39, 0, 344, 35, 39, 424, 47, - /* 1640 */ 349, 0, 351, 47, 336, 35, 0, 0, 0, 341, - /* 1650 */ 39, 0, 344, 35, 22, 0, 98, 349, 43, 351, - /* 1660 */ 35, 35, 43, 0, 96, 35, 375, 22, 0, 0, - /* 1670 */ 379, 380, 381, 382, 383, 384, 22, 386, 22, 22, - /* 1680 */ 49, 0, 0, 375, 308, 35, 33, 379, 380, 381, - /* 1690 */ 382, 383, 384, 35, 386, 0, 35, 22, 20, 0, - /* 1700 */ 47, 35, 308, 0, 154, 52, 53, 54, 55, 56, - /* 1710 */ 157, 22, 336, 173, 0, 0, 0, 0, 0, 90, - /* 1720 */ 344, 35, 89, 0, 89, 349, 89, 351, 0, 157, - /* 1730 */ 336, 157, 159, 39, 46, 155, 43, 99, 344, 153, - /* 1740 */ 43, 88, 231, 349, 91, 351, 46, 43, 89, 43, - /* 1750 */ 231, 375, 90, 89, 89, 379, 380, 381, 382, 383, - /* 1760 */ 384, 308, 386, 90, 90, 182, 89, 46, 89, 375, - /* 1770 */ 90, 46, 89, 379, 380, 381, 382, 383, 384, 89, - /* 1780 */ 386, 90, 90, 43, 46, 89, 43, 46, 90, 336, - /* 1790 */ 35, 90, 90, 35, 35, 35, 35, 344, 35, 2, - /* 1800 */ 225, 22, 349, 90, 351, 193, 153, 154, 231, 156, - /* 1810 */ 308, 43, 90, 160, 90, 46, 89, 46, 89, 89, - /* 1820 */ 89, 89, 22, 89, 35, 90, 35, 90, 375, 176, - /* 1830 */ 35, 308, 379, 380, 381, 382, 383, 384, 336, 386, - /* 1840 */ 89, 89, 100, 90, 195, 35, 344, 90, 35, 35, - /* 1850 */ 22, 349, 90, 351, 89, 89, 89, 113, 113, 336, - /* 1860 */ 113, 113, 89, 89, 43, 101, 35, 344, 22, 89, - /* 1870 */ 62, 35, 349, 61, 351, 35, 35, 375, 35, 35, - /* 1880 */ 35, 379, 380, 381, 382, 383, 384, 35, 386, 308, - /* 1890 */ 68, 87, 43, 35, 35, 22, 35, 22, 375, 308, - /* 1900 */ 35, 35, 379, 380, 381, 382, 383, 384, 22, 386, - /* 1910 */ 68, 35, 35, 35, 35, 35, 35, 336, 0, 35, - /* 1920 */ 0, 47, 39, 35, 39, 344, 0, 336, 35, 47, - /* 1930 */ 349, 39, 351, 0, 47, 344, 35, 39, 47, 0, - /* 1940 */ 349, 35, 351, 0, 35, 22, 21, 427, 427, 22, - /* 1950 */ 22, 308, 21, 427, 20, 427, 375, 427, 427, 427, - /* 1960 */ 379, 380, 381, 382, 383, 384, 375, 386, 308, 427, - /* 1970 */ 379, 380, 381, 382, 383, 384, 427, 386, 427, 336, - /* 1980 */ 427, 427, 427, 427, 427, 427, 427, 344, 427, 427, - /* 1990 */ 427, 427, 349, 427, 351, 427, 336, 427, 427, 427, - /* 2000 */ 427, 427, 427, 427, 344, 427, 427, 427, 427, 349, - /* 2010 */ 427, 351, 427, 427, 427, 427, 427, 308, 375, 427, - /* 2020 */ 427, 427, 379, 380, 381, 382, 383, 384, 427, 386, - /* 2030 */ 427, 427, 427, 427, 308, 375, 427, 427, 427, 379, - /* 2040 */ 380, 381, 382, 383, 384, 336, 386, 427, 427, 427, - /* 2050 */ 427, 427, 427, 344, 427, 427, 427, 427, 349, 427, - /* 2060 */ 351, 427, 336, 427, 427, 427, 427, 427, 427, 427, - /* 2070 */ 344, 427, 427, 427, 427, 349, 427, 351, 427, 427, - /* 2080 */ 427, 427, 427, 308, 375, 427, 427, 427, 379, 380, - /* 2090 */ 381, 382, 383, 384, 427, 386, 427, 427, 427, 427, - /* 2100 */ 427, 375, 427, 427, 427, 379, 380, 381, 382, 383, - /* 2110 */ 384, 336, 386, 427, 427, 427, 427, 427, 427, 344, - /* 2120 */ 427, 427, 427, 427, 349, 427, 351, 427, 427, 427, - /* 2130 */ 427, 427, 427, 427, 427, 427, 427, 308, 427, 427, - /* 2140 */ 427, 427, 427, 427, 427, 427, 427, 308, 427, 427, - /* 2150 */ 375, 427, 427, 427, 379, 380, 381, 382, 383, 384, - /* 2160 */ 427, 386, 427, 427, 427, 336, 427, 427, 427, 427, - /* 2170 */ 427, 427, 427, 344, 427, 336, 427, 427, 349, 427, - /* 2180 */ 351, 427, 427, 344, 427, 427, 427, 427, 349, 427, - /* 2190 */ 351, 427, 427, 427, 427, 427, 427, 427, 308, 427, - /* 2200 */ 427, 427, 427, 427, 375, 427, 427, 427, 379, 380, - /* 2210 */ 381, 382, 383, 384, 375, 386, 308, 427, 379, 380, - /* 2220 */ 381, 382, 383, 384, 427, 386, 336, 427, 427, 427, - /* 2230 */ 427, 427, 427, 427, 344, 427, 427, 427, 427, 349, - /* 2240 */ 427, 351, 427, 427, 336, 427, 427, 427, 427, 427, - /* 2250 */ 427, 427, 344, 427, 427, 427, 427, 349, 427, 351, - /* 2260 */ 427, 427, 427, 427, 427, 375, 427, 427, 427, 379, - /* 2270 */ 380, 381, 382, 383, 384, 308, 386, 427, 427, 427, - /* 2280 */ 427, 427, 427, 375, 427, 427, 427, 379, 380, 381, - /* 2290 */ 382, 383, 384, 427, 386, 427, 308, 427, 427, 427, - /* 2300 */ 427, 427, 427, 336, 427, 427, 427, 427, 427, 427, - /* 2310 */ 427, 344, 427, 427, 427, 427, 349, 427, 351, 427, - /* 2320 */ 427, 427, 427, 427, 336, 427, 427, 427, 427, 427, - /* 2330 */ 427, 427, 344, 427, 427, 427, 427, 349, 427, 351, - /* 2340 */ 427, 427, 375, 427, 427, 427, 379, 380, 381, 382, - /* 2350 */ 383, 384, 308, 386, 427, 427, 427, 427, 427, 427, - /* 2360 */ 427, 427, 308, 375, 427, 427, 427, 379, 380, 381, - /* 2370 */ 382, 383, 384, 427, 386, 427, 427, 427, 427, 427, - /* 2380 */ 336, 427, 427, 427, 427, 427, 427, 427, 344, 427, - /* 2390 */ 336, 427, 427, 349, 316, 351, 427, 427, 344, 427, - /* 2400 */ 427, 427, 427, 349, 427, 351, 427, 427, 427, 427, - /* 2410 */ 427, 427, 427, 427, 427, 427, 427, 427, 427, 375, - /* 2420 */ 427, 427, 344, 379, 380, 381, 382, 383, 384, 375, - /* 2430 */ 386, 427, 427, 379, 380, 381, 382, 383, 384, 427, - /* 2440 */ 386, 316, 364, 427, 427, 427, 427, 427, 427, 427, - /* 2450 */ 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, - /* 2460 */ 427, 383, 427, 427, 427, 427, 427, 427, 427, 344, - /* 2470 */ 427, 427, 427, 427, 427, 427, 398, 399, 400, 427, - /* 2480 */ 402, 427, 427, 405, 427, 427, 427, 427, 427, 364, - /* 2490 */ 427, 427, 427, 427, 427, 427, 418, 427, 427, 427, - /* 2500 */ 422, 427, 427, 427, 427, 427, 427, 427, 383, 427, - /* 2510 */ 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, - /* 2520 */ 427, 427, 427, 398, 399, 400, 427, 402, 427, 427, - /* 2530 */ 405, 427, 427, 427, 427, 427, 427, 427, 427, 427, - /* 2540 */ 427, 427, 427, 418, 427, 427, 427, 422, + /* 930 */ 210, 211, 212, 213, 382, 18, 308, 308, 43, 364, + /* 940 */ 23, 364, 225, 226, 0, 64, 65, 0, 338, 397, + /* 950 */ 398, 399, 71, 401, 37, 38, 336, 157, 41, 349, + /* 960 */ 316, 316, 81, 82, 308, 336, 42, 43, 411, 22, + /* 970 */ 157, 327, 327, 344, 57, 58, 59, 349, 349, 404, + /* 980 */ 351, 404, 315, 373, 374, 375, 308, 308, 344, 344, + /* 990 */ 316, 47, 417, 364, 417, 385, 421, 56, 421, 364, + /* 1000 */ 244, 327, 308, 374, 308, 349, 89, 378, 379, 380, + /* 1010 */ 381, 382, 383, 316, 385, 336, 43, 388, 344, 12, + /* 1020 */ 13, 392, 393, 344, 327, 197, 316, 349, 349, 22, + /* 1030 */ 351, 93, 91, 404, 96, 228, 348, 327, 316, 404, + /* 1040 */ 33, 344, 35, 349, 127, 349, 417, 0, 308, 327, + /* 1050 */ 421, 317, 417, 374, 344, 308, 421, 378, 379, 380, + /* 1060 */ 381, 382, 383, 56, 385, 316, 344, 388, 43, 22, + /* 1070 */ 197, 392, 393, 394, 316, 68, 327, 157, 158, 162, + /* 1080 */ 163, 164, 403, 336, 167, 327, 43, 93, 316, 349, + /* 1090 */ 96, 344, 93, 344, 93, 96, 349, 96, 351, 327, + /* 1100 */ 183, 0, 344, 186, 377, 188, 189, 190, 191, 192, + /* 1110 */ 35, 364, 43, 316, 308, 90, 344, 35, 111, 61, + /* 1120 */ 316, 374, 43, 22, 327, 378, 379, 380, 381, 382, + /* 1130 */ 383, 327, 385, 90, 43, 388, 43, 402, 19, 392, + /* 1140 */ 393, 344, 336, 248, 227, 43, 43, 341, 344, 43, + /* 1150 */ 344, 404, 33, 89, 308, 349, 13, 351, 43, 90, + /* 1160 */ 46, 1, 2, 99, 417, 395, 47, 405, 421, 90, + /* 1170 */ 418, 52, 53, 54, 55, 56, 43, 170, 35, 172, + /* 1180 */ 374, 90, 336, 90, 378, 379, 380, 381, 382, 383, + /* 1190 */ 344, 385, 90, 90, 308, 349, 90, 351, 125, 126, + /* 1200 */ 193, 194, 418, 89, 418, 90, 43, 88, 43, 229, + /* 1210 */ 91, 35, 205, 206, 207, 208, 209, 210, 211, 246, + /* 1220 */ 374, 47, 336, 90, 378, 379, 380, 381, 382, 383, + /* 1230 */ 344, 385, 366, 372, 388, 349, 43, 351, 392, 393, + /* 1240 */ 394, 43, 316, 124, 68, 371, 168, 172, 42, 403, + /* 1250 */ 356, 193, 20, 90, 172, 90, 316, 356, 316, 152, + /* 1260 */ 374, 354, 354, 308, 378, 379, 380, 381, 382, 383, + /* 1270 */ 344, 385, 316, 316, 388, 156, 316, 20, 392, 393, + /* 1280 */ 394, 310, 310, 90, 20, 370, 320, 351, 90, 403, + /* 1290 */ 364, 336, 20, 174, 320, 176, 363, 20, 320, 344, + /* 1300 */ 365, 320, 363, 320, 349, 320, 351, 316, 382, 320, + /* 1310 */ 310, 336, 336, 336, 316, 336, 310, 308, 336, 364, + /* 1320 */ 336, 370, 336, 397, 398, 399, 336, 401, 336, 374, + /* 1330 */ 404, 336, 336, 378, 379, 380, 381, 382, 383, 318, + /* 1340 */ 385, 175, 349, 417, 318, 336, 316, 421, 351, 316, + /* 1350 */ 318, 363, 234, 344, 349, 154, 369, 349, 349, 404, + /* 1360 */ 351, 359, 349, 349, 349, 20, 318, 308, 357, 359, + /* 1370 */ 332, 318, 417, 364, 235, 344, 421, 349, 410, 359, + /* 1380 */ 377, 359, 241, 374, 308, 349, 349, 378, 379, 380, + /* 1390 */ 381, 382, 383, 349, 385, 336, 161, 243, 349, 242, + /* 1400 */ 230, 372, 20, 344, 226, 344, 247, 245, 349, 89, + /* 1410 */ 351, 376, 336, 404, 410, 89, 349, 250, 326, 36, + /* 1420 */ 344, 340, 318, 311, 412, 349, 417, 351, 410, 413, + /* 1430 */ 421, 407, 409, 374, 308, 408, 419, 378, 379, 380, + /* 1440 */ 381, 382, 383, 391, 385, 316, 310, 388, 362, 319, + /* 1450 */ 374, 392, 393, 419, 378, 379, 380, 381, 382, 383, + /* 1460 */ 308, 385, 336, 367, 425, 420, 420, 330, 330, 419, + /* 1470 */ 344, 420, 330, 0, 306, 349, 0, 351, 177, 0, + /* 1480 */ 0, 42, 0, 35, 187, 35, 35, 35, 336, 187, + /* 1490 */ 0, 35, 35, 341, 187, 0, 344, 187, 422, 423, + /* 1500 */ 374, 349, 0, 351, 378, 379, 380, 381, 382, 383, + /* 1510 */ 308, 385, 35, 0, 388, 22, 0, 170, 35, 393, + /* 1520 */ 172, 0, 308, 166, 165, 0, 374, 0, 0, 0, + /* 1530 */ 378, 379, 380, 381, 382, 383, 308, 385, 336, 0, + /* 1540 */ 46, 42, 0, 0, 149, 0, 344, 144, 0, 35, + /* 1550 */ 336, 349, 0, 351, 0, 0, 0, 144, 344, 0, + /* 1560 */ 0, 0, 0, 349, 336, 351, 0, 0, 0, 0, + /* 1570 */ 0, 0, 344, 0, 0, 0, 374, 349, 0, 351, + /* 1580 */ 378, 379, 380, 381, 382, 383, 42, 385, 374, 308, + /* 1590 */ 0, 0, 378, 379, 380, 381, 382, 383, 0, 385, + /* 1600 */ 0, 0, 374, 22, 0, 0, 378, 379, 380, 381, + /* 1610 */ 382, 383, 0, 385, 0, 387, 0, 336, 416, 0, + /* 1620 */ 56, 0, 341, 39, 42, 344, 33, 0, 56, 0, + /* 1630 */ 349, 43, 351, 46, 14, 46, 14, 423, 0, 40, + /* 1640 */ 47, 39, 308, 35, 47, 52, 53, 54, 55, 56, + /* 1650 */ 0, 0, 0, 161, 308, 374, 39, 0, 0, 378, + /* 1660 */ 379, 380, 381, 382, 383, 0, 385, 0, 308, 62, + /* 1670 */ 336, 0, 39, 0, 35, 341, 39, 47, 344, 0, + /* 1680 */ 35, 88, 336, 349, 91, 351, 39, 47, 0, 35, + /* 1690 */ 344, 47, 39, 0, 0, 349, 336, 351, 0, 0, + /* 1700 */ 22, 35, 96, 0, 344, 35, 22, 98, 374, 349, + /* 1710 */ 0, 351, 378, 379, 380, 381, 382, 383, 43, 385, + /* 1720 */ 374, 35, 308, 43, 378, 379, 380, 381, 382, 383, + /* 1730 */ 35, 385, 0, 22, 374, 22, 308, 0, 378, 379, + /* 1740 */ 380, 381, 382, 383, 22, 385, 153, 154, 308, 156, + /* 1750 */ 336, 49, 35, 160, 0, 35, 0, 0, 344, 35, + /* 1760 */ 22, 20, 0, 349, 336, 351, 35, 157, 0, 176, + /* 1770 */ 22, 0, 344, 0, 159, 157, 336, 349, 154, 351, + /* 1780 */ 0, 0, 157, 0, 344, 89, 173, 90, 374, 349, + /* 1790 */ 35, 351, 378, 379, 380, 381, 382, 383, 0, 385, + /* 1800 */ 0, 89, 374, 155, 89, 308, 378, 379, 380, 381, + /* 1810 */ 382, 383, 153, 385, 374, 308, 182, 39, 378, 379, + /* 1820 */ 380, 381, 382, 383, 89, 385, 99, 43, 46, 89, + /* 1830 */ 43, 308, 90, 336, 89, 43, 90, 90, 89, 46, + /* 1840 */ 90, 344, 231, 336, 43, 46, 349, 89, 351, 89, + /* 1850 */ 89, 344, 43, 90, 89, 308, 349, 231, 351, 336, + /* 1860 */ 90, 46, 90, 46, 46, 90, 225, 344, 43, 90, + /* 1870 */ 35, 374, 349, 231, 351, 378, 379, 380, 381, 382, + /* 1880 */ 383, 374, 385, 336, 35, 378, 379, 380, 381, 382, + /* 1890 */ 383, 344, 385, 35, 35, 35, 349, 374, 351, 35, + /* 1900 */ 2, 378, 379, 380, 381, 382, 383, 22, 385, 193, + /* 1910 */ 43, 308, 89, 22, 90, 89, 46, 90, 89, 100, + /* 1920 */ 90, 374, 89, 89, 46, 378, 379, 380, 381, 382, + /* 1930 */ 383, 308, 385, 89, 35, 90, 35, 89, 195, 336, + /* 1940 */ 90, 35, 89, 35, 90, 89, 35, 344, 35, 113, + /* 1950 */ 89, 308, 349, 90, 351, 90, 113, 89, 22, 336, + /* 1960 */ 113, 89, 35, 101, 113, 89, 89, 344, 43, 22, + /* 1970 */ 61, 308, 349, 35, 351, 35, 62, 374, 35, 336, + /* 1980 */ 35, 378, 379, 380, 381, 382, 383, 344, 385, 35, + /* 1990 */ 35, 308, 349, 35, 351, 43, 68, 374, 35, 336, + /* 2000 */ 35, 378, 379, 380, 381, 382, 383, 344, 385, 87, + /* 2010 */ 22, 35, 349, 22, 351, 35, 35, 374, 68, 336, + /* 2020 */ 35, 378, 379, 380, 381, 382, 383, 344, 385, 35, + /* 2030 */ 35, 308, 349, 35, 351, 35, 22, 374, 35, 0, + /* 2040 */ 35, 378, 379, 380, 381, 382, 383, 308, 385, 47, + /* 2050 */ 39, 0, 35, 47, 39, 0, 35, 374, 39, 336, + /* 2060 */ 47, 378, 379, 380, 381, 382, 383, 344, 385, 0, + /* 2070 */ 35, 47, 349, 39, 351, 336, 0, 35, 35, 0, + /* 2080 */ 22, 21, 21, 344, 22, 22, 20, 426, 349, 426, + /* 2090 */ 351, 426, 426, 426, 426, 426, 426, 374, 308, 426, + /* 2100 */ 426, 378, 379, 380, 381, 382, 383, 426, 385, 426, + /* 2110 */ 426, 426, 426, 374, 308, 426, 426, 378, 379, 380, + /* 2120 */ 381, 382, 383, 426, 385, 426, 336, 426, 426, 426, + /* 2130 */ 426, 426, 426, 426, 344, 426, 426, 426, 426, 349, + /* 2140 */ 426, 351, 336, 426, 426, 426, 426, 426, 426, 426, + /* 2150 */ 344, 426, 426, 426, 426, 349, 426, 351, 426, 426, + /* 2160 */ 426, 426, 426, 426, 374, 308, 426, 426, 378, 379, + /* 2170 */ 380, 381, 382, 383, 426, 385, 426, 426, 426, 426, + /* 2180 */ 374, 308, 426, 426, 378, 379, 380, 381, 382, 383, + /* 2190 */ 426, 385, 426, 336, 426, 426, 426, 426, 426, 426, + /* 2200 */ 426, 344, 426, 426, 426, 426, 349, 426, 351, 336, + /* 2210 */ 426, 426, 426, 426, 426, 426, 426, 344, 426, 426, + /* 2220 */ 426, 426, 349, 426, 351, 426, 426, 426, 426, 426, + /* 2230 */ 426, 374, 426, 426, 426, 378, 379, 380, 381, 382, + /* 2240 */ 383, 426, 385, 426, 426, 426, 426, 374, 426, 426, + /* 2250 */ 426, 378, 379, 380, 381, 382, 383, 426, 385, 426, + /* 2260 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426, + /* 2270 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426, + /* 2280 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426, + /* 2290 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426, + /* 2300 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426, + /* 2310 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426, + /* 2320 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426, + /* 2330 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426, + /* 2340 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426, + /* 2350 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426, + /* 2360 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426, + /* 2370 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426, + /* 2380 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426, + /* 2390 */ 426, 426, }; #define YY_SHIFT_COUNT (666) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (1943) +#define YY_SHIFT_MAX (2079) static const unsigned short int yy_shift_ofst[] = { /* 0 */ 917, 0, 0, 62, 62, 264, 264, 264, 326, 326, /* 10 */ 264, 264, 391, 593, 720, 593, 593, 593, 593, 593, /* 20 */ 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, /* 30 */ 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, - /* 40 */ 593, 593, 325, 325, 361, 361, 361, 1019, 1019, 473, - /* 50 */ 1019, 1019, 389, 523, 283, 340, 283, 17, 17, 19, - /* 60 */ 19, 55, 6, 283, 283, 17, 17, 17, 17, 17, - /* 70 */ 17, 17, 17, 17, 17, 9, 17, 17, 17, 24, - /* 80 */ 17, 17, 102, 17, 17, 102, 109, 17, 102, 102, - /* 90 */ 102, 17, 135, 719, 662, 683, 683, 150, 213, 213, + /* 40 */ 593, 593, 265, 265, 17, 17, 17, 1007, 1007, 268, + /* 50 */ 1007, 1007, 160, 30, 56, 200, 56, 11, 11, 87, + /* 60 */ 87, 55, 165, 56, 56, 11, 11, 11, 11, 11, + /* 70 */ 11, 11, 11, 11, 11, 10, 11, 11, 11, 18, + /* 80 */ 11, 11, 67, 11, 11, 67, 123, 11, 67, 67, + /* 90 */ 67, 11, 227, 719, 662, 683, 683, 150, 213, 213, /* 100 */ 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, - /* 110 */ 213, 213, 213, 213, 213, 213, 213, 417, 155, 6, - /* 120 */ 630, 630, 757, 634, 909, 501, 501, 501, 634, 195, - /* 130 */ 195, 24, 292, 292, 102, 102, 255, 255, 287, 342, - /* 140 */ 198, 198, 198, 198, 198, 198, 198, 660, 21, 383, - /* 150 */ 565, 635, 5, 174, 125, 747, 873, 229, 497, 856, - /* 160 */ 939, 552, 776, 552, 740, 885, 885, 885, 892, 948, - /* 170 */ 955, 1145, 1025, 1178, 1203, 1203, 1178, 1085, 1085, 1203, - /* 180 */ 1203, 1203, 1219, 1219, 1243, 9, 24, 9, 1248, 1253, - /* 190 */ 9, 1248, 9, 9, 9, 1203, 9, 1219, 102, 102, - /* 200 */ 102, 102, 102, 102, 102, 102, 102, 102, 102, 1203, - /* 210 */ 1219, 255, 1243, 135, 1150, 24, 135, 1203, 1203, 1248, - /* 220 */ 135, 1102, 255, 255, 255, 255, 1102, 255, 1189, 135, - /* 230 */ 287, 135, 195, 1332, 255, 1134, 1102, 255, 255, 1134, - /* 240 */ 1102, 255, 255, 102, 1133, 1218, 1134, 1140, 1142, 1159, - /* 250 */ 955, 1164, 195, 1372, 1151, 1154, 1156, 1151, 1154, 1151, - /* 260 */ 1154, 1313, 1319, 255, 342, 1203, 135, 1379, 1219, 2548, - /* 270 */ 2548, 2548, 2548, 2548, 2548, 2548, 83, 1653, 214, 337, - /* 280 */ 126, 209, 492, 562, 625, 672, 535, 322, 713, 713, - /* 290 */ 713, 713, 713, 713, 713, 713, 717, 840, 405, 405, - /* 300 */ 69, 458, 197, 309, 85, 111, 8, 394, 170, 170, - /* 310 */ 170, 170, 929, 1021, 934, 966, 1002, 1020, 1026, 1119, - /* 320 */ 1121, 516, 841, 1034, 1035, 1048, 1071, 1072, 1079, 1083, - /* 330 */ 1148, 910, 994, 1010, 1088, 1008, 1050, 1040, 1097, 1066, - /* 340 */ 1110, 1115, 1117, 1124, 1125, 1127, 731, 1139, 1141, 1153, - /* 350 */ 1206, 1440, 1454, 1281, 1459, 1461, 1420, 1463, 1429, 1280, - /* 360 */ 1433, 1434, 1435, 1284, 1472, 1438, 1439, 1288, 1477, 1292, - /* 370 */ 1478, 1451, 1489, 1468, 1491, 1457, 1322, 1325, 1497, 1498, - /* 380 */ 1333, 1335, 1504, 1505, 1456, 1506, 1465, 1508, 1509, 1510, - /* 390 */ 1362, 1513, 1514, 1515, 1516, 1517, 1374, 1484, 1520, 1377, - /* 400 */ 1528, 1529, 1530, 1537, 1541, 1542, 1543, 1544, 1545, 1546, - /* 410 */ 1547, 1549, 1551, 1552, 1481, 1524, 1525, 1526, 1554, 1556, - /* 420 */ 1557, 1538, 1559, 1561, 1562, 1564, 1566, 1512, 1569, 1518, - /* 430 */ 1570, 1571, 1531, 1533, 1534, 1567, 1532, 1578, 1555, 1576, - /* 440 */ 1563, 1565, 1602, 1605, 1607, 1579, 1430, 1608, 1609, 1620, - /* 450 */ 1560, 1623, 1624, 1590, 1582, 1587, 1628, 1595, 1584, 1594, - /* 460 */ 1634, 1601, 1592, 1598, 1641, 1610, 1596, 1611, 1646, 1647, - /* 470 */ 1648, 1651, 1558, 1568, 1618, 1632, 1655, 1625, 1615, 1619, - /* 480 */ 1626, 1630, 1645, 1663, 1654, 1668, 1656, 1631, 1669, 1657, - /* 490 */ 1650, 1681, 1658, 1682, 1661, 1695, 1675, 1678, 1699, 1553, - /* 500 */ 1666, 1703, 1540, 1689, 1572, 1550, 1714, 1715, 1574, 1573, - /* 510 */ 1716, 1717, 1718, 1633, 1629, 1686, 1583, 1723, 1635, 1580, - /* 520 */ 1637, 1728, 1694, 1586, 1659, 1638, 1688, 1693, 1511, 1664, - /* 530 */ 1662, 1665, 1673, 1674, 1677, 1697, 1680, 1679, 1683, 1690, - /* 540 */ 1691, 1704, 1700, 1721, 1696, 1706, 1519, 1692, 1698, 1725, - /* 550 */ 1575, 1740, 1738, 1741, 1701, 1743, 1577, 1702, 1755, 1758, - /* 560 */ 1759, 1760, 1761, 1763, 1702, 1797, 1779, 1612, 1768, 1727, - /* 570 */ 1713, 1729, 1722, 1730, 1724, 1769, 1731, 1732, 1771, 1800, - /* 580 */ 1649, 1734, 1742, 1735, 1789, 1791, 1751, 1737, 1795, 1752, - /* 590 */ 1753, 1810, 1765, 1757, 1813, 1766, 1762, 1814, 1767, 1744, - /* 600 */ 1745, 1747, 1748, 1828, 1764, 1773, 1774, 1831, 1780, 1821, - /* 610 */ 1821, 1846, 1808, 1812, 1836, 1840, 1841, 1843, 1844, 1845, - /* 620 */ 1852, 1822, 1804, 1849, 1858, 1859, 1873, 1861, 1875, 1865, - /* 630 */ 1866, 1842, 1615, 1876, 1619, 1877, 1878, 1879, 1880, 1886, - /* 640 */ 1881, 1918, 1884, 1874, 1883, 1920, 1888, 1882, 1885, 1926, - /* 650 */ 1893, 1887, 1892, 1933, 1901, 1891, 1898, 1939, 1906, 1909, - /* 660 */ 1943, 1923, 1925, 1927, 1928, 1931, 1934, + /* 110 */ 213, 213, 213, 213, 213, 213, 213, 517, 881, 165, + /* 120 */ 403, 403, 573, 386, 849, 361, 361, 361, 386, 298, + /* 130 */ 298, 18, 350, 350, 67, 67, 294, 294, 270, 357, + /* 140 */ 198, 198, 198, 198, 198, 198, 198, 1119, 21, 383, + /* 150 */ 501, 105, 665, 484, 715, 828, 366, 799, 506, 800, + /* 160 */ 717, 649, 717, 924, 756, 756, 756, 807, 873, 980, + /* 170 */ 1174, 1078, 1206, 1232, 1232, 1206, 1107, 1107, 1232, 1232, + /* 180 */ 1232, 1257, 1257, 1264, 10, 18, 10, 1272, 1277, 10, + /* 190 */ 1272, 10, 10, 10, 1232, 10, 1257, 67, 67, 67, + /* 200 */ 67, 67, 67, 67, 67, 67, 67, 67, 1232, 1257, + /* 210 */ 294, 1264, 227, 1166, 18, 227, 1232, 1232, 1272, 227, + /* 220 */ 1118, 294, 294, 294, 294, 1118, 294, 1201, 227, 270, + /* 230 */ 227, 298, 1345, 294, 1139, 1118, 294, 294, 1139, 1118, + /* 240 */ 294, 294, 67, 1141, 1235, 1139, 1154, 1157, 1170, 980, + /* 250 */ 1178, 298, 1382, 1159, 1162, 1167, 1159, 1162, 1159, 1162, + /* 260 */ 1320, 1326, 294, 357, 1232, 227, 1383, 1257, 2259, 2259, + /* 270 */ 2259, 2259, 2259, 2259, 2259, 83, 1593, 214, 724, 126, + /* 280 */ 209, 491, 562, 622, 813, 535, 321, 698, 698, 698, + /* 290 */ 698, 698, 698, 698, 698, 521, 309, 13, 13, 115, + /* 300 */ 69, 599, 267, 708, 194, 377, 190, 465, 49, 49, + /* 310 */ 49, 49, 684, 944, 938, 994, 999, 1001, 947, 1047, + /* 320 */ 1101, 941, 920, 1025, 1043, 1069, 1079, 1091, 1093, 1102, + /* 330 */ 1160, 1073, 973, 895, 1103, 1075, 1082, 1058, 1106, 1114, + /* 340 */ 1115, 1133, 1163, 1165, 1193, 1198, 1064, 860, 1143, 1176, + /* 350 */ 839, 1473, 1476, 1301, 1479, 1480, 1439, 1482, 1448, 1297, + /* 360 */ 1450, 1451, 1452, 1302, 1490, 1456, 1457, 1307, 1495, 1310, + /* 370 */ 1502, 1477, 1513, 1493, 1516, 1483, 1348, 1347, 1521, 1527, + /* 380 */ 1357, 1359, 1525, 1528, 1494, 1529, 1499, 1539, 1542, 1543, + /* 390 */ 1395, 1545, 1552, 1554, 1555, 1556, 1403, 1514, 1548, 1413, + /* 400 */ 1559, 1560, 1561, 1562, 1566, 1567, 1568, 1569, 1570, 1571, + /* 410 */ 1573, 1574, 1575, 1578, 1544, 1590, 1591, 1598, 1600, 1601, + /* 420 */ 1612, 1581, 1604, 1605, 1614, 1616, 1619, 1564, 1621, 1572, + /* 430 */ 1627, 1629, 1582, 1584, 1588, 1620, 1587, 1622, 1589, 1638, + /* 440 */ 1599, 1602, 1650, 1651, 1652, 1617, 1492, 1657, 1658, 1665, + /* 450 */ 1607, 1667, 1671, 1608, 1597, 1633, 1673, 1639, 1630, 1637, + /* 460 */ 1679, 1645, 1640, 1647, 1688, 1654, 1644, 1653, 1693, 1694, + /* 470 */ 1698, 1699, 1609, 1606, 1666, 1678, 1703, 1670, 1675, 1680, + /* 480 */ 1686, 1695, 1684, 1710, 1711, 1732, 1713, 1702, 1737, 1722, + /* 490 */ 1717, 1754, 1720, 1756, 1724, 1757, 1738, 1741, 1762, 1610, + /* 500 */ 1731, 1768, 1613, 1748, 1618, 1624, 1771, 1773, 1625, 1615, + /* 510 */ 1780, 1781, 1783, 1696, 1697, 1755, 1634, 1798, 1712, 1648, + /* 520 */ 1715, 1800, 1778, 1659, 1735, 1727, 1782, 1784, 1611, 1740, + /* 530 */ 1742, 1745, 1746, 1747, 1749, 1787, 1750, 1758, 1760, 1761, + /* 540 */ 1763, 1792, 1793, 1799, 1765, 1801, 1626, 1770, 1772, 1815, + /* 550 */ 1641, 1809, 1817, 1818, 1775, 1825, 1642, 1779, 1835, 1849, + /* 560 */ 1858, 1859, 1860, 1864, 1779, 1898, 1885, 1716, 1867, 1823, + /* 570 */ 1824, 1826, 1827, 1829, 1830, 1870, 1833, 1834, 1878, 1891, + /* 580 */ 1743, 1844, 1819, 1845, 1899, 1901, 1848, 1850, 1906, 1853, + /* 590 */ 1854, 1908, 1856, 1863, 1911, 1861, 1865, 1913, 1868, 1836, + /* 600 */ 1843, 1847, 1851, 1936, 1862, 1872, 1876, 1927, 1877, 1925, + /* 610 */ 1925, 1947, 1914, 1909, 1938, 1940, 1943, 1945, 1954, 1955, + /* 620 */ 1958, 1928, 1922, 1952, 1963, 1965, 1988, 1976, 1991, 1980, + /* 630 */ 1981, 1950, 1675, 1985, 1680, 1994, 1995, 1998, 2000, 2014, + /* 640 */ 2003, 2039, 2005, 2002, 2011, 2051, 2017, 2006, 2015, 2055, + /* 650 */ 2021, 2013, 2019, 2069, 2035, 2024, 2034, 2076, 2042, 2043, + /* 660 */ 2079, 2058, 2060, 2062, 2063, 2061, 2066, }; -#define YY_REDUCE_COUNT (275) -#define YY_REDUCE_MIN (-389) -#define YY_REDUCE_MAX (2125) +#define YY_REDUCE_COUNT (274) +#define YY_REDUCE_MIN (-403) +#define YY_REDUCE_MAX (1873) static const short yy_reduce_ofst[] = { - /* 0 */ -75, 602, 629, -279, -15, 753, 815, 867, 923, 933, - /* 10 */ 273, 982, 104, 1042, 1052, 1101, 1152, 1204, 1214, 1231, - /* 20 */ 1291, 1308, 1376, 1394, 1453, 1502, 1523, 1581, 1591, 1643, - /* 30 */ 1660, 1709, 1726, 1775, 1829, 1839, 1890, 1908, 1967, 1988, - /* 40 */ 2044, 2054, 2078, 2125, -312, 33, 425, -295, 436, 490, - /* 50 */ -308, 654, -345, -68, 77, 151, 246, -316, -310, -307, - /* 60 */ -276, -285, -238, -87, -78, -313, -33, 239, 367, 418, - /* 70 */ 420, 427, 505, 623, 689, 161, 698, 722, 723, -234, - /* 80 */ 729, 730, 727, 742, 744, -244, -153, 749, 754, -10, - /* 90 */ 782, 767, 91, -124, -389, -389, -389, -164, -51, -30, - /* 100 */ -21, 100, 148, 205, 272, 311, 323, 357, 381, 382, - /* 110 */ 430, 464, 540, 603, 607, 640, 641, 28, -263, -335, - /* 120 */ 392, 443, 438, -229, -85, 234, 467, 570, 79, 18, - /* 130 */ 365, 312, 341, 413, 223, 665, 545, 596, 668, 610, - /* 140 */ 90, 220, 249, 308, 373, 387, 398, 252, 534, 518, - /* 150 */ 645, 571, 677, 693, 680, 781, 781, 839, 848, 817, - /* 160 */ 793, 769, 769, 769, 783, 758, 762, 763, 777, 781, - /* 170 */ 812, 814, 836, 876, 918, 919, 880, 884, 889, 928, - /* 180 */ 936, 941, 935, 945, 894, 946, 914, 950, 912, 911, - /* 190 */ 958, 916, 960, 961, 963, 969, 968, 976, 953, 956, - /* 200 */ 957, 959, 964, 965, 974, 975, 984, 985, 986, 978, - /* 210 */ 981, 947, 954, 1005, 930, 979, 1009, 1013, 1016, 971, - /* 220 */ 1017, 980, 988, 991, 993, 995, 987, 998, 992, 1030, - /* 230 */ 1022, 1038, 1014, 989, 1004, 962, 1000, 1023, 1028, 970, - /* 240 */ 1011, 1031, 1033, 781, 973, 972, 983, 990, 996, 999, - /* 250 */ 1024, 769, 1051, 1027, 997, 1029, 1003, 1018, 1036, 1032, - /* 260 */ 1037, 1055, 1070, 1062, 1086, 1098, 1095, 1105, 1109, 1053, - /* 270 */ 1068, 1113, 1114, 1118, 1132, 1149, + /* 0 */ 263, 629, 747, -278, -14, 679, 846, 886, 955, 1009, + /* 10 */ 272, 1059, 104, 1076, 1126, 806, 1152, 1202, 1214, 1228, + /* 20 */ 1281, 1334, 1346, 1360, 1414, 1428, 1440, 1497, 1507, 1523, + /* 30 */ 1547, 1603, 1623, 1643, 1663, 1683, 1723, 1739, 1790, 1806, + /* 40 */ 1857, 1873, 304, 926, 112, 435, 552, -295, 610, -3, + /* 50 */ -261, -154, -323, 305, 575, 577, 635, -310, -272, -312, + /* 60 */ -307, -403, -311, -284, -70, -90, 225, 346, 404, 414, + /* 70 */ 429, 434, 497, 644, 645, 339, 674, 697, 710, -342, + /* 80 */ 722, 749, -313, 758, 772, -244, -251, 797, -17, 20, + /* 90 */ 48, 804, 228, 86, -379, -379, -379, -240, -13, 100, + /* 100 */ 101, 130, 206, 375, 385, 428, 459, 461, 474, 522, + /* 110 */ 566, 628, 656, 678, 694, 696, 740, -226, -92, -72, + /* 120 */ -256, -130, 173, 4, 34, 186, 362, 440, 97, 91, + /* 130 */ 303, -234, -109, 180, 274, 536, 317, 545, 579, 234, + /* 140 */ -333, 40, 144, 175, 242, 319, 410, 450, 445, 348, + /* 150 */ 369, 495, 572, 557, 620, 620, 734, 667, 688, 727, + /* 160 */ 735, 735, 735, 770, 752, 784, 786, 762, 620, 861, + /* 170 */ 874, 866, 894, 940, 942, 901, 907, 908, 956, 957, + /* 180 */ 960, 971, 972, 915, 966, 936, 974, 933, 935, 978, + /* 190 */ 939, 981, 983, 985, 991, 989, 1000, 975, 976, 977, + /* 200 */ 979, 982, 984, 986, 990, 992, 995, 996, 998, 1006, + /* 210 */ 993, 951, 1021, 987, 997, 1026, 1030, 1033, 988, 1032, + /* 220 */ 1002, 1005, 1008, 1013, 1014, 1010, 1015, 1011, 1048, 1038, + /* 230 */ 1053, 1031, 1003, 1028, 968, 1020, 1036, 1037, 1004, 1022, + /* 240 */ 1044, 1049, 620, 1016, 1012, 1018, 1023, 1027, 1024, 1029, + /* 250 */ 735, 1061, 1035, 1045, 1017, 1039, 1046, 1034, 1051, 1050, + /* 260 */ 1052, 1081, 1067, 1092, 1129, 1104, 1112, 1136, 1096, 1086, + /* 270 */ 1137, 1138, 1142, 1130, 1168, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 10 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 20 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 30 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 40 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 50 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 60 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 70 */ 1464, 1464, 1464, 1464, 1464, 1538, 1464, 1464, 1464, 1464, - /* 80 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 90 */ 1464, 1464, 1536, 1694, 1464, 1871, 1464, 1464, 1464, 1464, - /* 100 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 110 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 120 */ 1464, 1464, 1538, 1464, 1536, 1883, 1883, 1883, 1464, 1464, - /* 130 */ 1464, 1464, 1737, 1737, 1464, 1464, 1464, 1464, 1636, 1464, - /* 140 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1729, 1464, 1952, - /* 150 */ 1464, 1464, 1464, 1735, 1906, 1464, 1464, 1464, 1464, 1589, - /* 160 */ 1898, 1875, 1889, 1876, 1873, 1937, 1937, 1937, 1892, 1464, - /* 170 */ 1902, 1464, 1722, 1699, 1464, 1464, 1699, 1696, 1696, 1464, - /* 180 */ 1464, 1464, 1464, 1464, 1464, 1538, 1464, 1538, 1464, 1464, - /* 190 */ 1538, 1464, 1538, 1538, 1538, 1464, 1538, 1464, 1464, 1464, - /* 200 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 210 */ 1464, 1464, 1464, 1536, 1731, 1464, 1536, 1464, 1464, 1464, - /* 220 */ 1536, 1911, 1464, 1464, 1464, 1464, 1911, 1464, 1464, 1536, - /* 230 */ 1464, 1536, 1464, 1464, 1464, 1913, 1911, 1464, 1464, 1913, - /* 240 */ 1911, 1464, 1464, 1464, 1925, 1921, 1913, 1929, 1927, 1904, - /* 250 */ 1902, 1889, 1464, 1464, 1943, 1939, 1955, 1943, 1939, 1943, - /* 260 */ 1939, 1464, 1605, 1464, 1464, 1464, 1536, 1496, 1464, 1724, - /* 270 */ 1737, 1639, 1639, 1639, 1539, 1469, 1464, 1464, 1464, 1464, - /* 280 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1808, 1924, - /* 290 */ 1923, 1847, 1846, 1845, 1843, 1807, 1464, 1601, 1806, 1805, - /* 300 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1799, 1800, - /* 310 */ 1798, 1797, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 320 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 330 */ 1872, 1464, 1940, 1944, 1464, 1464, 1464, 1464, 1464, 1783, - /* 340 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 350 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 360 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 370 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 380 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 390 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 400 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 410 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 420 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 430 */ 1464, 1464, 1464, 1464, 1501, 1464, 1464, 1464, 1464, 1464, - /* 440 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 450 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 460 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 470 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1573, 1572, - /* 480 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 490 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 500 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 510 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1741, 1464, 1464, - /* 520 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1905, 1464, 1464, - /* 530 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 540 */ 1464, 1464, 1464, 1783, 1464, 1922, 1464, 1882, 1878, 1464, - /* 550 */ 1464, 1874, 1782, 1464, 1464, 1938, 1464, 1464, 1464, 1464, - /* 560 */ 1464, 1464, 1464, 1464, 1464, 1867, 1464, 1464, 1840, 1825, - /* 570 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 580 */ 1793, 1464, 1464, 1464, 1464, 1464, 1633, 1464, 1464, 1464, - /* 590 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1618, - /* 600 */ 1616, 1615, 1614, 1464, 1611, 1464, 1464, 1464, 1464, 1642, - /* 610 */ 1641, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 620 */ 1464, 1464, 1464, 1557, 1464, 1464, 1464, 1464, 1464, 1464, - /* 630 */ 1464, 1464, 1549, 1464, 1548, 1464, 1464, 1464, 1464, 1464, - /* 640 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 650 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, - /* 660 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, + /* 0 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 10 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 20 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 30 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 40 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 50 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 60 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 70 */ 1461, 1461, 1461, 1461, 1461, 1535, 1461, 1461, 1461, 1461, + /* 80 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 90 */ 1461, 1461, 1533, 1691, 1461, 1866, 1461, 1461, 1461, 1461, + /* 100 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 110 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 120 */ 1461, 1461, 1535, 1461, 1533, 1878, 1878, 1878, 1461, 1461, + /* 130 */ 1461, 1461, 1732, 1732, 1461, 1461, 1461, 1461, 1633, 1461, + /* 140 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1726, 1461, 1947, + /* 150 */ 1461, 1461, 1461, 1901, 1461, 1461, 1461, 1461, 1586, 1893, + /* 160 */ 1870, 1884, 1871, 1868, 1932, 1932, 1932, 1887, 1461, 1897, + /* 170 */ 1461, 1719, 1696, 1461, 1461, 1696, 1693, 1693, 1461, 1461, + /* 180 */ 1461, 1461, 1461, 1461, 1535, 1461, 1535, 1461, 1461, 1535, + /* 190 */ 1461, 1535, 1535, 1535, 1461, 1535, 1461, 1461, 1461, 1461, + /* 200 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 210 */ 1461, 1461, 1533, 1728, 1461, 1533, 1461, 1461, 1461, 1533, + /* 220 */ 1906, 1461, 1461, 1461, 1461, 1906, 1461, 1461, 1533, 1461, + /* 230 */ 1533, 1461, 1461, 1461, 1908, 1906, 1461, 1461, 1908, 1906, + /* 240 */ 1461, 1461, 1461, 1920, 1916, 1908, 1924, 1922, 1899, 1897, + /* 250 */ 1884, 1461, 1461, 1938, 1934, 1950, 1938, 1934, 1938, 1934, + /* 260 */ 1461, 1602, 1461, 1461, 1461, 1533, 1493, 1461, 1721, 1732, + /* 270 */ 1636, 1636, 1636, 1536, 1466, 1461, 1461, 1461, 1461, 1461, + /* 280 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1803, 1919, 1918, + /* 290 */ 1842, 1841, 1840, 1838, 1802, 1461, 1598, 1801, 1800, 1461, + /* 300 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1794, 1795, + /* 310 */ 1793, 1792, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 320 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 330 */ 1867, 1461, 1935, 1939, 1461, 1461, 1461, 1461, 1461, 1778, + /* 340 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 350 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 360 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 370 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 380 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 390 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 400 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 410 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 420 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 430 */ 1461, 1461, 1461, 1461, 1498, 1461, 1461, 1461, 1461, 1461, + /* 440 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 450 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 460 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 470 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1570, 1569, + /* 480 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 490 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 500 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 510 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1736, 1461, 1461, + /* 520 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1900, 1461, 1461, + /* 530 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 540 */ 1461, 1461, 1461, 1778, 1461, 1917, 1461, 1877, 1873, 1461, + /* 550 */ 1461, 1869, 1777, 1461, 1461, 1933, 1461, 1461, 1461, 1461, + /* 560 */ 1461, 1461, 1461, 1461, 1461, 1862, 1461, 1461, 1835, 1820, + /* 570 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 580 */ 1788, 1461, 1461, 1461, 1461, 1461, 1630, 1461, 1461, 1461, + /* 590 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1615, + /* 600 */ 1613, 1612, 1611, 1461, 1608, 1461, 1461, 1461, 1461, 1639, + /* 610 */ 1638, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 620 */ 1461, 1461, 1461, 1554, 1461, 1461, 1461, 1461, 1461, 1461, + /* 630 */ 1461, 1461, 1546, 1461, 1545, 1461, 1461, 1461, 1461, 1461, + /* 640 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 650 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, + /* 660 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, }; /********** End of lemon-generated parsing tables *****************************/ @@ -1686,62 +1642,61 @@ static const char *const yyTokenName[] = { /* 368 */ "agg_func_opt", /* 369 */ "bufsize_opt", /* 370 */ "stream_name", - /* 371 */ "into_opt", - /* 372 */ "dnode_list", - /* 373 */ "where_clause_opt", - /* 374 */ "signed", - /* 375 */ "literal_func", - /* 376 */ "literal_list", - /* 377 */ "table_alias", - /* 378 */ "column_alias", - /* 379 */ "expression", - /* 380 */ "pseudo_column", - /* 381 */ "column_reference", - /* 382 */ "function_expression", - /* 383 */ "subquery", - /* 384 */ "star_func", - /* 385 */ "star_func_para_list", - /* 386 */ "noarg_func", - /* 387 */ "other_para_list", - /* 388 */ "star_func_para", - /* 389 */ "predicate", - /* 390 */ "compare_op", - /* 391 */ "in_op", - /* 392 */ "in_predicate_value", - /* 393 */ "boolean_value_expression", - /* 394 */ "boolean_primary", - /* 395 */ "common_expression", - /* 396 */ "from_clause_opt", - /* 397 */ "table_reference_list", - /* 398 */ "table_reference", - /* 399 */ "table_primary", - /* 400 */ "joined_table", - /* 401 */ "alias_opt", - /* 402 */ "parenthesized_joined_table", - /* 403 */ "join_type", - /* 404 */ "search_condition", - /* 405 */ "query_specification", - /* 406 */ "set_quantifier_opt", - /* 407 */ "select_list", - /* 408 */ "partition_by_clause_opt", - /* 409 */ "range_opt", - /* 410 */ "every_opt", - /* 411 */ "fill_opt", - /* 412 */ "twindow_clause_opt", - /* 413 */ "group_by_clause_opt", - /* 414 */ "having_clause_opt", - /* 415 */ "select_item", - /* 416 */ "fill_mode", - /* 417 */ "group_by_list", - /* 418 */ "query_expression_body", - /* 419 */ "order_by_clause_opt", - /* 420 */ "slimit_clause_opt", - /* 421 */ "limit_clause_opt", - /* 422 */ "query_primary", - /* 423 */ "sort_specification_list", - /* 424 */ "sort_specification", - /* 425 */ "ordering_specification_opt", - /* 426 */ "null_ordering_opt", + /* 371 */ "dnode_list", + /* 372 */ "where_clause_opt", + /* 373 */ "signed", + /* 374 */ "literal_func", + /* 375 */ "literal_list", + /* 376 */ "table_alias", + /* 377 */ "column_alias", + /* 378 */ "expression", + /* 379 */ "pseudo_column", + /* 380 */ "column_reference", + /* 381 */ "function_expression", + /* 382 */ "subquery", + /* 383 */ "star_func", + /* 384 */ "star_func_para_list", + /* 385 */ "noarg_func", + /* 386 */ "other_para_list", + /* 387 */ "star_func_para", + /* 388 */ "predicate", + /* 389 */ "compare_op", + /* 390 */ "in_op", + /* 391 */ "in_predicate_value", + /* 392 */ "boolean_value_expression", + /* 393 */ "boolean_primary", + /* 394 */ "common_expression", + /* 395 */ "from_clause_opt", + /* 396 */ "table_reference_list", + /* 397 */ "table_reference", + /* 398 */ "table_primary", + /* 399 */ "joined_table", + /* 400 */ "alias_opt", + /* 401 */ "parenthesized_joined_table", + /* 402 */ "join_type", + /* 403 */ "search_condition", + /* 404 */ "query_specification", + /* 405 */ "set_quantifier_opt", + /* 406 */ "select_list", + /* 407 */ "partition_by_clause_opt", + /* 408 */ "range_opt", + /* 409 */ "every_opt", + /* 410 */ "fill_opt", + /* 411 */ "twindow_clause_opt", + /* 412 */ "group_by_clause_opt", + /* 413 */ "having_clause_opt", + /* 414 */ "select_item", + /* 415 */ "fill_mode", + /* 416 */ "group_by_list", + /* 417 */ "query_expression_body", + /* 418 */ "order_by_clause_opt", + /* 419 */ "slimit_clause_opt", + /* 420 */ "limit_clause_opt", + /* 421 */ "query_primary", + /* 422 */ "sort_specification_list", + /* 423 */ "sort_specification", + /* 424 */ "ordering_specification_opt", + /* 425 */ "null_ordering_opt", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -2015,231 +1970,229 @@ static const char *const yyRuleName[] = { /* 263 */ "agg_func_opt ::= AGGREGATE", /* 264 */ "bufsize_opt ::=", /* 265 */ "bufsize_opt ::= BUFSIZE NK_INTEGER", - /* 266 */ "cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression", + /* 266 */ "cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name AS query_expression", /* 267 */ "cmd ::= DROP STREAM exists_opt stream_name", - /* 268 */ "into_opt ::=", - /* 269 */ "into_opt ::= INTO full_table_name", - /* 270 */ "stream_options ::=", - /* 271 */ "stream_options ::= stream_options TRIGGER AT_ONCE", - /* 272 */ "stream_options ::= stream_options TRIGGER WINDOW_CLOSE", - /* 273 */ "stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal", - /* 274 */ "stream_options ::= stream_options WATERMARK duration_literal", - /* 275 */ "stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER", - /* 276 */ "cmd ::= KILL CONNECTION NK_INTEGER", - /* 277 */ "cmd ::= KILL QUERY NK_STRING", - /* 278 */ "cmd ::= KILL TRANSACTION NK_INTEGER", - /* 279 */ "cmd ::= BALANCE VGROUP", - /* 280 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER", - /* 281 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list", - /* 282 */ "cmd ::= SPLIT VGROUP NK_INTEGER", - /* 283 */ "dnode_list ::= DNODE NK_INTEGER", - /* 284 */ "dnode_list ::= dnode_list DNODE NK_INTEGER", - /* 285 */ "cmd ::= DELETE FROM full_table_name where_clause_opt", - /* 286 */ "cmd ::= query_expression", - /* 287 */ "cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression", - /* 288 */ "cmd ::= INSERT INTO full_table_name query_expression", - /* 289 */ "literal ::= NK_INTEGER", - /* 290 */ "literal ::= NK_FLOAT", - /* 291 */ "literal ::= NK_STRING", - /* 292 */ "literal ::= NK_BOOL", - /* 293 */ "literal ::= TIMESTAMP NK_STRING", - /* 294 */ "literal ::= duration_literal", - /* 295 */ "literal ::= NULL", - /* 296 */ "literal ::= NK_QUESTION", - /* 297 */ "duration_literal ::= NK_VARIABLE", - /* 298 */ "signed ::= NK_INTEGER", - /* 299 */ "signed ::= NK_PLUS NK_INTEGER", - /* 300 */ "signed ::= NK_MINUS NK_INTEGER", - /* 301 */ "signed ::= NK_FLOAT", - /* 302 */ "signed ::= NK_PLUS NK_FLOAT", - /* 303 */ "signed ::= NK_MINUS NK_FLOAT", - /* 304 */ "signed_literal ::= signed", - /* 305 */ "signed_literal ::= NK_STRING", - /* 306 */ "signed_literal ::= NK_BOOL", - /* 307 */ "signed_literal ::= TIMESTAMP NK_STRING", - /* 308 */ "signed_literal ::= duration_literal", - /* 309 */ "signed_literal ::= NULL", - /* 310 */ "signed_literal ::= literal_func", - /* 311 */ "signed_literal ::= NK_QUESTION", - /* 312 */ "literal_list ::= signed_literal", - /* 313 */ "literal_list ::= literal_list NK_COMMA signed_literal", - /* 314 */ "db_name ::= NK_ID", - /* 315 */ "table_name ::= NK_ID", - /* 316 */ "column_name ::= NK_ID", - /* 317 */ "function_name ::= NK_ID", - /* 318 */ "table_alias ::= NK_ID", - /* 319 */ "column_alias ::= NK_ID", - /* 320 */ "user_name ::= NK_ID", - /* 321 */ "topic_name ::= NK_ID", - /* 322 */ "stream_name ::= NK_ID", - /* 323 */ "cgroup_name ::= NK_ID", - /* 324 */ "expression ::= literal", - /* 325 */ "expression ::= pseudo_column", - /* 326 */ "expression ::= column_reference", - /* 327 */ "expression ::= function_expression", - /* 328 */ "expression ::= subquery", - /* 329 */ "expression ::= NK_LP expression NK_RP", - /* 330 */ "expression ::= NK_PLUS expression", - /* 331 */ "expression ::= NK_MINUS expression", - /* 332 */ "expression ::= expression NK_PLUS expression", - /* 333 */ "expression ::= expression NK_MINUS expression", - /* 334 */ "expression ::= expression NK_STAR expression", - /* 335 */ "expression ::= expression NK_SLASH expression", - /* 336 */ "expression ::= expression NK_REM expression", - /* 337 */ "expression ::= column_reference NK_ARROW NK_STRING", - /* 338 */ "expression ::= expression NK_BITAND expression", - /* 339 */ "expression ::= expression NK_BITOR expression", - /* 340 */ "expression_list ::= expression", - /* 341 */ "expression_list ::= expression_list NK_COMMA expression", - /* 342 */ "column_reference ::= column_name", - /* 343 */ "column_reference ::= table_name NK_DOT column_name", - /* 344 */ "pseudo_column ::= ROWTS", - /* 345 */ "pseudo_column ::= TBNAME", - /* 346 */ "pseudo_column ::= table_name NK_DOT TBNAME", - /* 347 */ "pseudo_column ::= QSTART", - /* 348 */ "pseudo_column ::= QEND", - /* 349 */ "pseudo_column ::= QDURATION", - /* 350 */ "pseudo_column ::= WSTART", - /* 351 */ "pseudo_column ::= WEND", - /* 352 */ "pseudo_column ::= WDURATION", - /* 353 */ "function_expression ::= function_name NK_LP expression_list NK_RP", - /* 354 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP", - /* 355 */ "function_expression ::= CAST NK_LP expression AS type_name NK_RP", - /* 356 */ "function_expression ::= literal_func", - /* 357 */ "literal_func ::= noarg_func NK_LP NK_RP", - /* 358 */ "literal_func ::= NOW", - /* 359 */ "noarg_func ::= NOW", - /* 360 */ "noarg_func ::= TODAY", - /* 361 */ "noarg_func ::= TIMEZONE", - /* 362 */ "noarg_func ::= DATABASE", - /* 363 */ "noarg_func ::= CLIENT_VERSION", - /* 364 */ "noarg_func ::= SERVER_VERSION", - /* 365 */ "noarg_func ::= SERVER_STATUS", - /* 366 */ "noarg_func ::= CURRENT_USER", - /* 367 */ "noarg_func ::= USER", - /* 368 */ "star_func ::= COUNT", - /* 369 */ "star_func ::= FIRST", - /* 370 */ "star_func ::= LAST", - /* 371 */ "star_func ::= LAST_ROW", - /* 372 */ "star_func_para_list ::= NK_STAR", - /* 373 */ "star_func_para_list ::= other_para_list", - /* 374 */ "other_para_list ::= star_func_para", - /* 375 */ "other_para_list ::= other_para_list NK_COMMA star_func_para", - /* 376 */ "star_func_para ::= expression", - /* 377 */ "star_func_para ::= table_name NK_DOT NK_STAR", - /* 378 */ "predicate ::= expression compare_op expression", - /* 379 */ "predicate ::= expression BETWEEN expression AND expression", - /* 380 */ "predicate ::= expression NOT BETWEEN expression AND expression", - /* 381 */ "predicate ::= expression IS NULL", - /* 382 */ "predicate ::= expression IS NOT NULL", - /* 383 */ "predicate ::= expression in_op in_predicate_value", - /* 384 */ "compare_op ::= NK_LT", - /* 385 */ "compare_op ::= NK_GT", - /* 386 */ "compare_op ::= NK_LE", - /* 387 */ "compare_op ::= NK_GE", - /* 388 */ "compare_op ::= NK_NE", - /* 389 */ "compare_op ::= NK_EQ", - /* 390 */ "compare_op ::= LIKE", - /* 391 */ "compare_op ::= NOT LIKE", - /* 392 */ "compare_op ::= MATCH", - /* 393 */ "compare_op ::= NMATCH", - /* 394 */ "compare_op ::= CONTAINS", - /* 395 */ "in_op ::= IN", - /* 396 */ "in_op ::= NOT IN", - /* 397 */ "in_predicate_value ::= NK_LP literal_list NK_RP", - /* 398 */ "boolean_value_expression ::= boolean_primary", - /* 399 */ "boolean_value_expression ::= NOT boolean_primary", - /* 400 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression", - /* 401 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression", - /* 402 */ "boolean_primary ::= predicate", - /* 403 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP", - /* 404 */ "common_expression ::= expression", - /* 405 */ "common_expression ::= boolean_value_expression", - /* 406 */ "from_clause_opt ::=", - /* 407 */ "from_clause_opt ::= FROM table_reference_list", - /* 408 */ "table_reference_list ::= table_reference", - /* 409 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference", - /* 410 */ "table_reference ::= table_primary", - /* 411 */ "table_reference ::= joined_table", - /* 412 */ "table_primary ::= table_name alias_opt", - /* 413 */ "table_primary ::= db_name NK_DOT table_name alias_opt", - /* 414 */ "table_primary ::= subquery alias_opt", - /* 415 */ "table_primary ::= parenthesized_joined_table", - /* 416 */ "alias_opt ::=", - /* 417 */ "alias_opt ::= table_alias", - /* 418 */ "alias_opt ::= AS table_alias", - /* 419 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP", - /* 420 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP", - /* 421 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition", - /* 422 */ "join_type ::=", - /* 423 */ "join_type ::= INNER", - /* 424 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt", - /* 425 */ "set_quantifier_opt ::=", - /* 426 */ "set_quantifier_opt ::= DISTINCT", - /* 427 */ "set_quantifier_opt ::= ALL", - /* 428 */ "select_list ::= select_item", - /* 429 */ "select_list ::= select_list NK_COMMA select_item", - /* 430 */ "select_item ::= NK_STAR", - /* 431 */ "select_item ::= common_expression", - /* 432 */ "select_item ::= common_expression column_alias", - /* 433 */ "select_item ::= common_expression AS column_alias", - /* 434 */ "select_item ::= table_name NK_DOT NK_STAR", - /* 435 */ "where_clause_opt ::=", - /* 436 */ "where_clause_opt ::= WHERE search_condition", - /* 437 */ "partition_by_clause_opt ::=", - /* 438 */ "partition_by_clause_opt ::= PARTITION BY expression_list", - /* 439 */ "twindow_clause_opt ::=", - /* 440 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP", - /* 441 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP", - /* 442 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt", - /* 443 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt", - /* 444 */ "sliding_opt ::=", - /* 445 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP", - /* 446 */ "fill_opt ::=", - /* 447 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP", - /* 448 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP", - /* 449 */ "fill_mode ::= NONE", - /* 450 */ "fill_mode ::= PREV", - /* 451 */ "fill_mode ::= NULL", - /* 452 */ "fill_mode ::= LINEAR", - /* 453 */ "fill_mode ::= NEXT", - /* 454 */ "group_by_clause_opt ::=", - /* 455 */ "group_by_clause_opt ::= GROUP BY group_by_list", - /* 456 */ "group_by_list ::= expression", - /* 457 */ "group_by_list ::= group_by_list NK_COMMA expression", - /* 458 */ "having_clause_opt ::=", - /* 459 */ "having_clause_opt ::= HAVING search_condition", - /* 460 */ "range_opt ::=", - /* 461 */ "range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP", - /* 462 */ "every_opt ::=", - /* 463 */ "every_opt ::= EVERY NK_LP duration_literal NK_RP", - /* 464 */ "query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt", - /* 465 */ "query_expression_body ::= query_primary", - /* 466 */ "query_expression_body ::= query_expression_body UNION ALL query_expression_body", - /* 467 */ "query_expression_body ::= query_expression_body UNION query_expression_body", - /* 468 */ "query_primary ::= query_specification", - /* 469 */ "query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP", - /* 470 */ "order_by_clause_opt ::=", - /* 471 */ "order_by_clause_opt ::= ORDER BY sort_specification_list", - /* 472 */ "slimit_clause_opt ::=", - /* 473 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER", - /* 474 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER", - /* 475 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER", - /* 476 */ "limit_clause_opt ::=", - /* 477 */ "limit_clause_opt ::= LIMIT NK_INTEGER", - /* 478 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER", - /* 479 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER", - /* 480 */ "subquery ::= NK_LP query_expression NK_RP", - /* 481 */ "search_condition ::= common_expression", - /* 482 */ "sort_specification_list ::= sort_specification", - /* 483 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification", - /* 484 */ "sort_specification ::= expression ordering_specification_opt null_ordering_opt", - /* 485 */ "ordering_specification_opt ::=", - /* 486 */ "ordering_specification_opt ::= ASC", - /* 487 */ "ordering_specification_opt ::= DESC", - /* 488 */ "null_ordering_opt ::=", - /* 489 */ "null_ordering_opt ::= NULLS FIRST", - /* 490 */ "null_ordering_opt ::= NULLS LAST", + /* 268 */ "stream_options ::=", + /* 269 */ "stream_options ::= stream_options TRIGGER AT_ONCE", + /* 270 */ "stream_options ::= stream_options TRIGGER WINDOW_CLOSE", + /* 271 */ "stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal", + /* 272 */ "stream_options ::= stream_options WATERMARK duration_literal", + /* 273 */ "stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER", + /* 274 */ "cmd ::= KILL CONNECTION NK_INTEGER", + /* 275 */ "cmd ::= KILL QUERY NK_STRING", + /* 276 */ "cmd ::= KILL TRANSACTION NK_INTEGER", + /* 277 */ "cmd ::= BALANCE VGROUP", + /* 278 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER", + /* 279 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list", + /* 280 */ "cmd ::= SPLIT VGROUP NK_INTEGER", + /* 281 */ "dnode_list ::= DNODE NK_INTEGER", + /* 282 */ "dnode_list ::= dnode_list DNODE NK_INTEGER", + /* 283 */ "cmd ::= DELETE FROM full_table_name where_clause_opt", + /* 284 */ "cmd ::= query_expression", + /* 285 */ "cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression", + /* 286 */ "cmd ::= INSERT INTO full_table_name query_expression", + /* 287 */ "literal ::= NK_INTEGER", + /* 288 */ "literal ::= NK_FLOAT", + /* 289 */ "literal ::= NK_STRING", + /* 290 */ "literal ::= NK_BOOL", + /* 291 */ "literal ::= TIMESTAMP NK_STRING", + /* 292 */ "literal ::= duration_literal", + /* 293 */ "literal ::= NULL", + /* 294 */ "literal ::= NK_QUESTION", + /* 295 */ "duration_literal ::= NK_VARIABLE", + /* 296 */ "signed ::= NK_INTEGER", + /* 297 */ "signed ::= NK_PLUS NK_INTEGER", + /* 298 */ "signed ::= NK_MINUS NK_INTEGER", + /* 299 */ "signed ::= NK_FLOAT", + /* 300 */ "signed ::= NK_PLUS NK_FLOAT", + /* 301 */ "signed ::= NK_MINUS NK_FLOAT", + /* 302 */ "signed_literal ::= signed", + /* 303 */ "signed_literal ::= NK_STRING", + /* 304 */ "signed_literal ::= NK_BOOL", + /* 305 */ "signed_literal ::= TIMESTAMP NK_STRING", + /* 306 */ "signed_literal ::= duration_literal", + /* 307 */ "signed_literal ::= NULL", + /* 308 */ "signed_literal ::= literal_func", + /* 309 */ "signed_literal ::= NK_QUESTION", + /* 310 */ "literal_list ::= signed_literal", + /* 311 */ "literal_list ::= literal_list NK_COMMA signed_literal", + /* 312 */ "db_name ::= NK_ID", + /* 313 */ "table_name ::= NK_ID", + /* 314 */ "column_name ::= NK_ID", + /* 315 */ "function_name ::= NK_ID", + /* 316 */ "table_alias ::= NK_ID", + /* 317 */ "column_alias ::= NK_ID", + /* 318 */ "user_name ::= NK_ID", + /* 319 */ "topic_name ::= NK_ID", + /* 320 */ "stream_name ::= NK_ID", + /* 321 */ "cgroup_name ::= NK_ID", + /* 322 */ "expression ::= literal", + /* 323 */ "expression ::= pseudo_column", + /* 324 */ "expression ::= column_reference", + /* 325 */ "expression ::= function_expression", + /* 326 */ "expression ::= subquery", + /* 327 */ "expression ::= NK_LP expression NK_RP", + /* 328 */ "expression ::= NK_PLUS expression", + /* 329 */ "expression ::= NK_MINUS expression", + /* 330 */ "expression ::= expression NK_PLUS expression", + /* 331 */ "expression ::= expression NK_MINUS expression", + /* 332 */ "expression ::= expression NK_STAR expression", + /* 333 */ "expression ::= expression NK_SLASH expression", + /* 334 */ "expression ::= expression NK_REM expression", + /* 335 */ "expression ::= column_reference NK_ARROW NK_STRING", + /* 336 */ "expression ::= expression NK_BITAND expression", + /* 337 */ "expression ::= expression NK_BITOR expression", + /* 338 */ "expression_list ::= expression", + /* 339 */ "expression_list ::= expression_list NK_COMMA expression", + /* 340 */ "column_reference ::= column_name", + /* 341 */ "column_reference ::= table_name NK_DOT column_name", + /* 342 */ "pseudo_column ::= ROWTS", + /* 343 */ "pseudo_column ::= TBNAME", + /* 344 */ "pseudo_column ::= table_name NK_DOT TBNAME", + /* 345 */ "pseudo_column ::= QSTART", + /* 346 */ "pseudo_column ::= QEND", + /* 347 */ "pseudo_column ::= QDURATION", + /* 348 */ "pseudo_column ::= WSTART", + /* 349 */ "pseudo_column ::= WEND", + /* 350 */ "pseudo_column ::= WDURATION", + /* 351 */ "function_expression ::= function_name NK_LP expression_list NK_RP", + /* 352 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP", + /* 353 */ "function_expression ::= CAST NK_LP expression AS type_name NK_RP", + /* 354 */ "function_expression ::= literal_func", + /* 355 */ "literal_func ::= noarg_func NK_LP NK_RP", + /* 356 */ "literal_func ::= NOW", + /* 357 */ "noarg_func ::= NOW", + /* 358 */ "noarg_func ::= TODAY", + /* 359 */ "noarg_func ::= TIMEZONE", + /* 360 */ "noarg_func ::= DATABASE", + /* 361 */ "noarg_func ::= CLIENT_VERSION", + /* 362 */ "noarg_func ::= SERVER_VERSION", + /* 363 */ "noarg_func ::= SERVER_STATUS", + /* 364 */ "noarg_func ::= CURRENT_USER", + /* 365 */ "noarg_func ::= USER", + /* 366 */ "star_func ::= COUNT", + /* 367 */ "star_func ::= FIRST", + /* 368 */ "star_func ::= LAST", + /* 369 */ "star_func ::= LAST_ROW", + /* 370 */ "star_func_para_list ::= NK_STAR", + /* 371 */ "star_func_para_list ::= other_para_list", + /* 372 */ "other_para_list ::= star_func_para", + /* 373 */ "other_para_list ::= other_para_list NK_COMMA star_func_para", + /* 374 */ "star_func_para ::= expression", + /* 375 */ "star_func_para ::= table_name NK_DOT NK_STAR", + /* 376 */ "predicate ::= expression compare_op expression", + /* 377 */ "predicate ::= expression BETWEEN expression AND expression", + /* 378 */ "predicate ::= expression NOT BETWEEN expression AND expression", + /* 379 */ "predicate ::= expression IS NULL", + /* 380 */ "predicate ::= expression IS NOT NULL", + /* 381 */ "predicate ::= expression in_op in_predicate_value", + /* 382 */ "compare_op ::= NK_LT", + /* 383 */ "compare_op ::= NK_GT", + /* 384 */ "compare_op ::= NK_LE", + /* 385 */ "compare_op ::= NK_GE", + /* 386 */ "compare_op ::= NK_NE", + /* 387 */ "compare_op ::= NK_EQ", + /* 388 */ "compare_op ::= LIKE", + /* 389 */ "compare_op ::= NOT LIKE", + /* 390 */ "compare_op ::= MATCH", + /* 391 */ "compare_op ::= NMATCH", + /* 392 */ "compare_op ::= CONTAINS", + /* 393 */ "in_op ::= IN", + /* 394 */ "in_op ::= NOT IN", + /* 395 */ "in_predicate_value ::= NK_LP literal_list NK_RP", + /* 396 */ "boolean_value_expression ::= boolean_primary", + /* 397 */ "boolean_value_expression ::= NOT boolean_primary", + /* 398 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression", + /* 399 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression", + /* 400 */ "boolean_primary ::= predicate", + /* 401 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP", + /* 402 */ "common_expression ::= expression", + /* 403 */ "common_expression ::= boolean_value_expression", + /* 404 */ "from_clause_opt ::=", + /* 405 */ "from_clause_opt ::= FROM table_reference_list", + /* 406 */ "table_reference_list ::= table_reference", + /* 407 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference", + /* 408 */ "table_reference ::= table_primary", + /* 409 */ "table_reference ::= joined_table", + /* 410 */ "table_primary ::= table_name alias_opt", + /* 411 */ "table_primary ::= db_name NK_DOT table_name alias_opt", + /* 412 */ "table_primary ::= subquery alias_opt", + /* 413 */ "table_primary ::= parenthesized_joined_table", + /* 414 */ "alias_opt ::=", + /* 415 */ "alias_opt ::= table_alias", + /* 416 */ "alias_opt ::= AS table_alias", + /* 417 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP", + /* 418 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP", + /* 419 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition", + /* 420 */ "join_type ::=", + /* 421 */ "join_type ::= INNER", + /* 422 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt", + /* 423 */ "set_quantifier_opt ::=", + /* 424 */ "set_quantifier_opt ::= DISTINCT", + /* 425 */ "set_quantifier_opt ::= ALL", + /* 426 */ "select_list ::= select_item", + /* 427 */ "select_list ::= select_list NK_COMMA select_item", + /* 428 */ "select_item ::= NK_STAR", + /* 429 */ "select_item ::= common_expression", + /* 430 */ "select_item ::= common_expression column_alias", + /* 431 */ "select_item ::= common_expression AS column_alias", + /* 432 */ "select_item ::= table_name NK_DOT NK_STAR", + /* 433 */ "where_clause_opt ::=", + /* 434 */ "where_clause_opt ::= WHERE search_condition", + /* 435 */ "partition_by_clause_opt ::=", + /* 436 */ "partition_by_clause_opt ::= PARTITION BY expression_list", + /* 437 */ "twindow_clause_opt ::=", + /* 438 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP", + /* 439 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP", + /* 440 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt", + /* 441 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt", + /* 442 */ "sliding_opt ::=", + /* 443 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP", + /* 444 */ "fill_opt ::=", + /* 445 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP", + /* 446 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP", + /* 447 */ "fill_mode ::= NONE", + /* 448 */ "fill_mode ::= PREV", + /* 449 */ "fill_mode ::= NULL", + /* 450 */ "fill_mode ::= LINEAR", + /* 451 */ "fill_mode ::= NEXT", + /* 452 */ "group_by_clause_opt ::=", + /* 453 */ "group_by_clause_opt ::= GROUP BY group_by_list", + /* 454 */ "group_by_list ::= expression", + /* 455 */ "group_by_list ::= group_by_list NK_COMMA expression", + /* 456 */ "having_clause_opt ::=", + /* 457 */ "having_clause_opt ::= HAVING search_condition", + /* 458 */ "range_opt ::=", + /* 459 */ "range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP", + /* 460 */ "every_opt ::=", + /* 461 */ "every_opt ::= EVERY NK_LP duration_literal NK_RP", + /* 462 */ "query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt", + /* 463 */ "query_expression_body ::= query_primary", + /* 464 */ "query_expression_body ::= query_expression_body UNION ALL query_expression_body", + /* 465 */ "query_expression_body ::= query_expression_body UNION query_expression_body", + /* 466 */ "query_primary ::= query_specification", + /* 467 */ "query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP", + /* 468 */ "order_by_clause_opt ::=", + /* 469 */ "order_by_clause_opt ::= ORDER BY sort_specification_list", + /* 470 */ "slimit_clause_opt ::=", + /* 471 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER", + /* 472 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER", + /* 473 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER", + /* 474 */ "limit_clause_opt ::=", + /* 475 */ "limit_clause_opt ::= LIMIT NK_INTEGER", + /* 476 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER", + /* 477 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER", + /* 478 */ "subquery ::= NK_LP query_expression NK_RP", + /* 479 */ "search_condition ::= common_expression", + /* 480 */ "sort_specification_list ::= sort_specification", + /* 481 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification", + /* 482 */ "sort_specification ::= expression ordering_specification_opt null_ordering_opt", + /* 483 */ "ordering_specification_opt ::=", + /* 484 */ "ordering_specification_opt ::= ASC", + /* 485 */ "ordering_specification_opt ::= DESC", + /* 486 */ "null_ordering_opt ::=", + /* 487 */ "null_ordering_opt ::= NULLS FIRST", + /* 488 */ "null_ordering_opt ::= NULLS LAST", }; #endif /* NDEBUG */ @@ -2393,42 +2346,41 @@ static void yy_destructor( case 362: /* stream_options */ case 364: /* query_expression */ case 367: /* explain_options */ - case 371: /* into_opt */ - case 373: /* where_clause_opt */ - case 374: /* signed */ - case 375: /* literal_func */ - case 379: /* expression */ - case 380: /* pseudo_column */ - case 381: /* column_reference */ - case 382: /* function_expression */ - case 383: /* subquery */ - case 388: /* star_func_para */ - case 389: /* predicate */ - case 392: /* in_predicate_value */ - case 393: /* boolean_value_expression */ - case 394: /* boolean_primary */ - case 395: /* common_expression */ - case 396: /* from_clause_opt */ - case 397: /* table_reference_list */ - case 398: /* table_reference */ - case 399: /* table_primary */ - case 400: /* joined_table */ - case 402: /* parenthesized_joined_table */ - case 404: /* search_condition */ - case 405: /* query_specification */ - case 409: /* range_opt */ - case 410: /* every_opt */ - case 411: /* fill_opt */ - case 412: /* twindow_clause_opt */ - case 414: /* having_clause_opt */ - case 415: /* select_item */ - case 418: /* query_expression_body */ - case 420: /* slimit_clause_opt */ - case 421: /* limit_clause_opt */ - case 422: /* query_primary */ - case 424: /* sort_specification */ + case 372: /* where_clause_opt */ + case 373: /* signed */ + case 374: /* literal_func */ + case 378: /* expression */ + case 379: /* pseudo_column */ + case 380: /* column_reference */ + case 381: /* function_expression */ + case 382: /* subquery */ + case 387: /* star_func_para */ + case 388: /* predicate */ + case 391: /* in_predicate_value */ + case 392: /* boolean_value_expression */ + case 393: /* boolean_primary */ + case 394: /* common_expression */ + case 395: /* from_clause_opt */ + case 396: /* table_reference_list */ + case 397: /* table_reference */ + case 398: /* table_primary */ + case 399: /* joined_table */ + case 401: /* parenthesized_joined_table */ + case 403: /* search_condition */ + case 404: /* query_specification */ + case 408: /* range_opt */ + case 409: /* every_opt */ + case 410: /* fill_opt */ + case 411: /* twindow_clause_opt */ + case 413: /* having_clause_opt */ + case 414: /* select_item */ + case 417: /* query_expression_body */ + case 419: /* slimit_clause_opt */ + case 420: /* limit_clause_opt */ + case 421: /* query_primary */ + case 423: /* sort_specification */ { - nodesDestroyNode((yypminor->yy840)); + nodesDestroyNode((yypminor->yy272)); } break; case 306: /* account_options */ @@ -2449,11 +2401,11 @@ static void yy_destructor( case 363: /* topic_name */ case 365: /* cgroup_name */ case 370: /* stream_name */ - case 377: /* table_alias */ - case 378: /* column_alias */ - case 384: /* star_func */ - case 386: /* noarg_func */ - case 401: /* alias_opt */ + case 376: /* table_alias */ + case 377: /* column_alias */ + case 383: /* star_func */ + case 385: /* noarg_func */ + case 400: /* alias_opt */ { } @@ -2474,7 +2426,7 @@ static void yy_destructor( case 320: /* exists_opt */ case 366: /* analyze_opt */ case 368: /* agg_func_opt */ - case 406: /* set_quantifier_opt */ + case 405: /* set_quantifier_opt */ { } @@ -2493,18 +2445,18 @@ static void yy_destructor( case 346: /* duration_list */ case 347: /* rollup_func_list */ case 358: /* func_list */ - case 372: /* dnode_list */ - case 376: /* literal_list */ - case 385: /* star_func_para_list */ - case 387: /* other_para_list */ - case 407: /* select_list */ - case 408: /* partition_by_clause_opt */ - case 413: /* group_by_clause_opt */ - case 417: /* group_by_list */ - case 419: /* order_by_clause_opt */ - case 423: /* sort_specification_list */ + case 371: /* dnode_list */ + case 375: /* literal_list */ + case 384: /* star_func_para_list */ + case 386: /* other_para_list */ + case 406: /* select_list */ + case 407: /* partition_by_clause_opt */ + case 412: /* group_by_clause_opt */ + case 416: /* group_by_list */ + case 418: /* order_by_clause_opt */ + case 422: /* sort_specification_list */ { - nodesDestroyList((yypminor->yy544)); + nodesDestroyList((yypminor->yy172)); } break; case 325: /* alter_db_option */ @@ -2518,28 +2470,28 @@ static void yy_destructor( } break; - case 390: /* compare_op */ - case 391: /* in_op */ + case 389: /* compare_op */ + case 390: /* in_op */ { } break; - case 403: /* join_type */ + case 402: /* join_type */ { } break; - case 416: /* fill_mode */ + case 415: /* fill_mode */ { } break; - case 425: /* ordering_specification_opt */ + case 424: /* ordering_specification_opt */ { } break; - case 426: /* null_ordering_opt */ + case 425: /* null_ordering_opt */ { } @@ -3104,231 +3056,229 @@ static const struct { { 368, -1 }, /* (263) agg_func_opt ::= AGGREGATE */ { 369, 0 }, /* (264) bufsize_opt ::= */ { 369, -2 }, /* (265) bufsize_opt ::= BUFSIZE NK_INTEGER */ - { 305, -8 }, /* (266) cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */ + { 305, -9 }, /* (266) cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name AS query_expression */ { 305, -4 }, /* (267) cmd ::= DROP STREAM exists_opt stream_name */ - { 371, 0 }, /* (268) into_opt ::= */ - { 371, -2 }, /* (269) into_opt ::= INTO full_table_name */ - { 362, 0 }, /* (270) stream_options ::= */ - { 362, -3 }, /* (271) stream_options ::= stream_options TRIGGER AT_ONCE */ - { 362, -3 }, /* (272) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ - { 362, -4 }, /* (273) stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */ - { 362, -3 }, /* (274) stream_options ::= stream_options WATERMARK duration_literal */ - { 362, -4 }, /* (275) stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */ - { 305, -3 }, /* (276) cmd ::= KILL CONNECTION NK_INTEGER */ - { 305, -3 }, /* (277) cmd ::= KILL QUERY NK_STRING */ - { 305, -3 }, /* (278) cmd ::= KILL TRANSACTION NK_INTEGER */ - { 305, -2 }, /* (279) cmd ::= BALANCE VGROUP */ - { 305, -4 }, /* (280) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ - { 305, -4 }, /* (281) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ - { 305, -3 }, /* (282) cmd ::= SPLIT VGROUP NK_INTEGER */ - { 372, -2 }, /* (283) dnode_list ::= DNODE NK_INTEGER */ - { 372, -3 }, /* (284) dnode_list ::= dnode_list DNODE NK_INTEGER */ - { 305, -4 }, /* (285) cmd ::= DELETE FROM full_table_name where_clause_opt */ - { 305, -1 }, /* (286) cmd ::= query_expression */ - { 305, -7 }, /* (287) cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression */ - { 305, -4 }, /* (288) cmd ::= INSERT INTO full_table_name query_expression */ - { 308, -1 }, /* (289) literal ::= NK_INTEGER */ - { 308, -1 }, /* (290) literal ::= NK_FLOAT */ - { 308, -1 }, /* (291) literal ::= NK_STRING */ - { 308, -1 }, /* (292) literal ::= NK_BOOL */ - { 308, -2 }, /* (293) literal ::= TIMESTAMP NK_STRING */ - { 308, -1 }, /* (294) literal ::= duration_literal */ - { 308, -1 }, /* (295) literal ::= NULL */ - { 308, -1 }, /* (296) literal ::= NK_QUESTION */ - { 349, -1 }, /* (297) duration_literal ::= NK_VARIABLE */ - { 374, -1 }, /* (298) signed ::= NK_INTEGER */ - { 374, -2 }, /* (299) signed ::= NK_PLUS NK_INTEGER */ - { 374, -2 }, /* (300) signed ::= NK_MINUS NK_INTEGER */ - { 374, -1 }, /* (301) signed ::= NK_FLOAT */ - { 374, -2 }, /* (302) signed ::= NK_PLUS NK_FLOAT */ - { 374, -2 }, /* (303) signed ::= NK_MINUS NK_FLOAT */ - { 338, -1 }, /* (304) signed_literal ::= signed */ - { 338, -1 }, /* (305) signed_literal ::= NK_STRING */ - { 338, -1 }, /* (306) signed_literal ::= NK_BOOL */ - { 338, -2 }, /* (307) signed_literal ::= TIMESTAMP NK_STRING */ - { 338, -1 }, /* (308) signed_literal ::= duration_literal */ - { 338, -1 }, /* (309) signed_literal ::= NULL */ - { 338, -1 }, /* (310) signed_literal ::= literal_func */ - { 338, -1 }, /* (311) signed_literal ::= NK_QUESTION */ - { 376, -1 }, /* (312) literal_list ::= signed_literal */ - { 376, -3 }, /* (313) literal_list ::= literal_list NK_COMMA signed_literal */ - { 316, -1 }, /* (314) db_name ::= NK_ID */ - { 344, -1 }, /* (315) table_name ::= NK_ID */ - { 336, -1 }, /* (316) column_name ::= NK_ID */ - { 351, -1 }, /* (317) function_name ::= NK_ID */ - { 377, -1 }, /* (318) table_alias ::= NK_ID */ - { 378, -1 }, /* (319) column_alias ::= NK_ID */ - { 310, -1 }, /* (320) user_name ::= NK_ID */ - { 363, -1 }, /* (321) topic_name ::= NK_ID */ - { 370, -1 }, /* (322) stream_name ::= NK_ID */ - { 365, -1 }, /* (323) cgroup_name ::= NK_ID */ - { 379, -1 }, /* (324) expression ::= literal */ - { 379, -1 }, /* (325) expression ::= pseudo_column */ - { 379, -1 }, /* (326) expression ::= column_reference */ - { 379, -1 }, /* (327) expression ::= function_expression */ - { 379, -1 }, /* (328) expression ::= subquery */ - { 379, -3 }, /* (329) expression ::= NK_LP expression NK_RP */ - { 379, -2 }, /* (330) expression ::= NK_PLUS expression */ - { 379, -2 }, /* (331) expression ::= NK_MINUS expression */ - { 379, -3 }, /* (332) expression ::= expression NK_PLUS expression */ - { 379, -3 }, /* (333) expression ::= expression NK_MINUS expression */ - { 379, -3 }, /* (334) expression ::= expression NK_STAR expression */ - { 379, -3 }, /* (335) expression ::= expression NK_SLASH expression */ - { 379, -3 }, /* (336) expression ::= expression NK_REM expression */ - { 379, -3 }, /* (337) expression ::= column_reference NK_ARROW NK_STRING */ - { 379, -3 }, /* (338) expression ::= expression NK_BITAND expression */ - { 379, -3 }, /* (339) expression ::= expression NK_BITOR expression */ - { 341, -1 }, /* (340) expression_list ::= expression */ - { 341, -3 }, /* (341) expression_list ::= expression_list NK_COMMA expression */ - { 381, -1 }, /* (342) column_reference ::= column_name */ - { 381, -3 }, /* (343) column_reference ::= table_name NK_DOT column_name */ - { 380, -1 }, /* (344) pseudo_column ::= ROWTS */ - { 380, -1 }, /* (345) pseudo_column ::= TBNAME */ - { 380, -3 }, /* (346) pseudo_column ::= table_name NK_DOT TBNAME */ - { 380, -1 }, /* (347) pseudo_column ::= QSTART */ - { 380, -1 }, /* (348) pseudo_column ::= QEND */ - { 380, -1 }, /* (349) pseudo_column ::= QDURATION */ - { 380, -1 }, /* (350) pseudo_column ::= WSTART */ - { 380, -1 }, /* (351) pseudo_column ::= WEND */ - { 380, -1 }, /* (352) pseudo_column ::= WDURATION */ - { 382, -4 }, /* (353) function_expression ::= function_name NK_LP expression_list NK_RP */ - { 382, -4 }, /* (354) function_expression ::= star_func NK_LP star_func_para_list NK_RP */ - { 382, -6 }, /* (355) function_expression ::= CAST NK_LP expression AS type_name NK_RP */ - { 382, -1 }, /* (356) function_expression ::= literal_func */ - { 375, -3 }, /* (357) literal_func ::= noarg_func NK_LP NK_RP */ - { 375, -1 }, /* (358) literal_func ::= NOW */ - { 386, -1 }, /* (359) noarg_func ::= NOW */ - { 386, -1 }, /* (360) noarg_func ::= TODAY */ - { 386, -1 }, /* (361) noarg_func ::= TIMEZONE */ - { 386, -1 }, /* (362) noarg_func ::= DATABASE */ - { 386, -1 }, /* (363) noarg_func ::= CLIENT_VERSION */ - { 386, -1 }, /* (364) noarg_func ::= SERVER_VERSION */ - { 386, -1 }, /* (365) noarg_func ::= SERVER_STATUS */ - { 386, -1 }, /* (366) noarg_func ::= CURRENT_USER */ - { 386, -1 }, /* (367) noarg_func ::= USER */ - { 384, -1 }, /* (368) star_func ::= COUNT */ - { 384, -1 }, /* (369) star_func ::= FIRST */ - { 384, -1 }, /* (370) star_func ::= LAST */ - { 384, -1 }, /* (371) star_func ::= LAST_ROW */ - { 385, -1 }, /* (372) star_func_para_list ::= NK_STAR */ - { 385, -1 }, /* (373) star_func_para_list ::= other_para_list */ - { 387, -1 }, /* (374) other_para_list ::= star_func_para */ - { 387, -3 }, /* (375) other_para_list ::= other_para_list NK_COMMA star_func_para */ - { 388, -1 }, /* (376) star_func_para ::= expression */ - { 388, -3 }, /* (377) star_func_para ::= table_name NK_DOT NK_STAR */ - { 389, -3 }, /* (378) predicate ::= expression compare_op expression */ - { 389, -5 }, /* (379) predicate ::= expression BETWEEN expression AND expression */ - { 389, -6 }, /* (380) predicate ::= expression NOT BETWEEN expression AND expression */ - { 389, -3 }, /* (381) predicate ::= expression IS NULL */ - { 389, -4 }, /* (382) predicate ::= expression IS NOT NULL */ - { 389, -3 }, /* (383) predicate ::= expression in_op in_predicate_value */ - { 390, -1 }, /* (384) compare_op ::= NK_LT */ - { 390, -1 }, /* (385) compare_op ::= NK_GT */ - { 390, -1 }, /* (386) compare_op ::= NK_LE */ - { 390, -1 }, /* (387) compare_op ::= NK_GE */ - { 390, -1 }, /* (388) compare_op ::= NK_NE */ - { 390, -1 }, /* (389) compare_op ::= NK_EQ */ - { 390, -1 }, /* (390) compare_op ::= LIKE */ - { 390, -2 }, /* (391) compare_op ::= NOT LIKE */ - { 390, -1 }, /* (392) compare_op ::= MATCH */ - { 390, -1 }, /* (393) compare_op ::= NMATCH */ - { 390, -1 }, /* (394) compare_op ::= CONTAINS */ - { 391, -1 }, /* (395) in_op ::= IN */ - { 391, -2 }, /* (396) in_op ::= NOT IN */ - { 392, -3 }, /* (397) in_predicate_value ::= NK_LP literal_list NK_RP */ - { 393, -1 }, /* (398) boolean_value_expression ::= boolean_primary */ - { 393, -2 }, /* (399) boolean_value_expression ::= NOT boolean_primary */ - { 393, -3 }, /* (400) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ - { 393, -3 }, /* (401) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ - { 394, -1 }, /* (402) boolean_primary ::= predicate */ - { 394, -3 }, /* (403) boolean_primary ::= NK_LP boolean_value_expression NK_RP */ - { 395, -1 }, /* (404) common_expression ::= expression */ - { 395, -1 }, /* (405) common_expression ::= boolean_value_expression */ - { 396, 0 }, /* (406) from_clause_opt ::= */ - { 396, -2 }, /* (407) from_clause_opt ::= FROM table_reference_list */ - { 397, -1 }, /* (408) table_reference_list ::= table_reference */ - { 397, -3 }, /* (409) table_reference_list ::= table_reference_list NK_COMMA table_reference */ - { 398, -1 }, /* (410) table_reference ::= table_primary */ - { 398, -1 }, /* (411) table_reference ::= joined_table */ - { 399, -2 }, /* (412) table_primary ::= table_name alias_opt */ - { 399, -4 }, /* (413) table_primary ::= db_name NK_DOT table_name alias_opt */ - { 399, -2 }, /* (414) table_primary ::= subquery alias_opt */ - { 399, -1 }, /* (415) table_primary ::= parenthesized_joined_table */ - { 401, 0 }, /* (416) alias_opt ::= */ - { 401, -1 }, /* (417) alias_opt ::= table_alias */ - { 401, -2 }, /* (418) alias_opt ::= AS table_alias */ - { 402, -3 }, /* (419) parenthesized_joined_table ::= NK_LP joined_table NK_RP */ - { 402, -3 }, /* (420) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ - { 400, -6 }, /* (421) joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ - { 403, 0 }, /* (422) join_type ::= */ - { 403, -1 }, /* (423) join_type ::= INNER */ - { 405, -12 }, /* (424) query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ - { 406, 0 }, /* (425) set_quantifier_opt ::= */ - { 406, -1 }, /* (426) set_quantifier_opt ::= DISTINCT */ - { 406, -1 }, /* (427) set_quantifier_opt ::= ALL */ - { 407, -1 }, /* (428) select_list ::= select_item */ - { 407, -3 }, /* (429) select_list ::= select_list NK_COMMA select_item */ - { 415, -1 }, /* (430) select_item ::= NK_STAR */ - { 415, -1 }, /* (431) select_item ::= common_expression */ - { 415, -2 }, /* (432) select_item ::= common_expression column_alias */ - { 415, -3 }, /* (433) select_item ::= common_expression AS column_alias */ - { 415, -3 }, /* (434) select_item ::= table_name NK_DOT NK_STAR */ - { 373, 0 }, /* (435) where_clause_opt ::= */ - { 373, -2 }, /* (436) where_clause_opt ::= WHERE search_condition */ - { 408, 0 }, /* (437) partition_by_clause_opt ::= */ - { 408, -3 }, /* (438) partition_by_clause_opt ::= PARTITION BY expression_list */ - { 412, 0 }, /* (439) twindow_clause_opt ::= */ - { 412, -6 }, /* (440) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ - { 412, -4 }, /* (441) twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */ - { 412, -6 }, /* (442) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ - { 412, -8 }, /* (443) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ - { 359, 0 }, /* (444) sliding_opt ::= */ - { 359, -4 }, /* (445) sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ - { 411, 0 }, /* (446) fill_opt ::= */ - { 411, -4 }, /* (447) fill_opt ::= FILL NK_LP fill_mode NK_RP */ - { 411, -6 }, /* (448) fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ - { 416, -1 }, /* (449) fill_mode ::= NONE */ - { 416, -1 }, /* (450) fill_mode ::= PREV */ - { 416, -1 }, /* (451) fill_mode ::= NULL */ - { 416, -1 }, /* (452) fill_mode ::= LINEAR */ - { 416, -1 }, /* (453) fill_mode ::= NEXT */ - { 413, 0 }, /* (454) group_by_clause_opt ::= */ - { 413, -3 }, /* (455) group_by_clause_opt ::= GROUP BY group_by_list */ - { 417, -1 }, /* (456) group_by_list ::= expression */ - { 417, -3 }, /* (457) group_by_list ::= group_by_list NK_COMMA expression */ - { 414, 0 }, /* (458) having_clause_opt ::= */ - { 414, -2 }, /* (459) having_clause_opt ::= HAVING search_condition */ - { 409, 0 }, /* (460) range_opt ::= */ - { 409, -6 }, /* (461) range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP */ - { 410, 0 }, /* (462) every_opt ::= */ - { 410, -4 }, /* (463) every_opt ::= EVERY NK_LP duration_literal NK_RP */ - { 364, -4 }, /* (464) query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */ - { 418, -1 }, /* (465) query_expression_body ::= query_primary */ - { 418, -4 }, /* (466) query_expression_body ::= query_expression_body UNION ALL query_expression_body */ - { 418, -3 }, /* (467) query_expression_body ::= query_expression_body UNION query_expression_body */ - { 422, -1 }, /* (468) query_primary ::= query_specification */ - { 422, -6 }, /* (469) query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */ - { 419, 0 }, /* (470) order_by_clause_opt ::= */ - { 419, -3 }, /* (471) order_by_clause_opt ::= ORDER BY sort_specification_list */ - { 420, 0 }, /* (472) slimit_clause_opt ::= */ - { 420, -2 }, /* (473) slimit_clause_opt ::= SLIMIT NK_INTEGER */ - { 420, -4 }, /* (474) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ - { 420, -4 }, /* (475) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - { 421, 0 }, /* (476) limit_clause_opt ::= */ - { 421, -2 }, /* (477) limit_clause_opt ::= LIMIT NK_INTEGER */ - { 421, -4 }, /* (478) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ - { 421, -4 }, /* (479) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - { 383, -3 }, /* (480) subquery ::= NK_LP query_expression NK_RP */ - { 404, -1 }, /* (481) search_condition ::= common_expression */ - { 423, -1 }, /* (482) sort_specification_list ::= sort_specification */ - { 423, -3 }, /* (483) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ - { 424, -3 }, /* (484) sort_specification ::= expression ordering_specification_opt null_ordering_opt */ - { 425, 0 }, /* (485) ordering_specification_opt ::= */ - { 425, -1 }, /* (486) ordering_specification_opt ::= ASC */ - { 425, -1 }, /* (487) ordering_specification_opt ::= DESC */ - { 426, 0 }, /* (488) null_ordering_opt ::= */ - { 426, -2 }, /* (489) null_ordering_opt ::= NULLS FIRST */ - { 426, -2 }, /* (490) null_ordering_opt ::= NULLS LAST */ + { 362, 0 }, /* (268) stream_options ::= */ + { 362, -3 }, /* (269) stream_options ::= stream_options TRIGGER AT_ONCE */ + { 362, -3 }, /* (270) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ + { 362, -4 }, /* (271) stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */ + { 362, -3 }, /* (272) stream_options ::= stream_options WATERMARK duration_literal */ + { 362, -4 }, /* (273) stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */ + { 305, -3 }, /* (274) cmd ::= KILL CONNECTION NK_INTEGER */ + { 305, -3 }, /* (275) cmd ::= KILL QUERY NK_STRING */ + { 305, -3 }, /* (276) cmd ::= KILL TRANSACTION NK_INTEGER */ + { 305, -2 }, /* (277) cmd ::= BALANCE VGROUP */ + { 305, -4 }, /* (278) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ + { 305, -4 }, /* (279) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ + { 305, -3 }, /* (280) cmd ::= SPLIT VGROUP NK_INTEGER */ + { 371, -2 }, /* (281) dnode_list ::= DNODE NK_INTEGER */ + { 371, -3 }, /* (282) dnode_list ::= dnode_list DNODE NK_INTEGER */ + { 305, -4 }, /* (283) cmd ::= DELETE FROM full_table_name where_clause_opt */ + { 305, -1 }, /* (284) cmd ::= query_expression */ + { 305, -7 }, /* (285) cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression */ + { 305, -4 }, /* (286) cmd ::= INSERT INTO full_table_name query_expression */ + { 308, -1 }, /* (287) literal ::= NK_INTEGER */ + { 308, -1 }, /* (288) literal ::= NK_FLOAT */ + { 308, -1 }, /* (289) literal ::= NK_STRING */ + { 308, -1 }, /* (290) literal ::= NK_BOOL */ + { 308, -2 }, /* (291) literal ::= TIMESTAMP NK_STRING */ + { 308, -1 }, /* (292) literal ::= duration_literal */ + { 308, -1 }, /* (293) literal ::= NULL */ + { 308, -1 }, /* (294) literal ::= NK_QUESTION */ + { 349, -1 }, /* (295) duration_literal ::= NK_VARIABLE */ + { 373, -1 }, /* (296) signed ::= NK_INTEGER */ + { 373, -2 }, /* (297) signed ::= NK_PLUS NK_INTEGER */ + { 373, -2 }, /* (298) signed ::= NK_MINUS NK_INTEGER */ + { 373, -1 }, /* (299) signed ::= NK_FLOAT */ + { 373, -2 }, /* (300) signed ::= NK_PLUS NK_FLOAT */ + { 373, -2 }, /* (301) signed ::= NK_MINUS NK_FLOAT */ + { 338, -1 }, /* (302) signed_literal ::= signed */ + { 338, -1 }, /* (303) signed_literal ::= NK_STRING */ + { 338, -1 }, /* (304) signed_literal ::= NK_BOOL */ + { 338, -2 }, /* (305) signed_literal ::= TIMESTAMP NK_STRING */ + { 338, -1 }, /* (306) signed_literal ::= duration_literal */ + { 338, -1 }, /* (307) signed_literal ::= NULL */ + { 338, -1 }, /* (308) signed_literal ::= literal_func */ + { 338, -1 }, /* (309) signed_literal ::= NK_QUESTION */ + { 375, -1 }, /* (310) literal_list ::= signed_literal */ + { 375, -3 }, /* (311) literal_list ::= literal_list NK_COMMA signed_literal */ + { 316, -1 }, /* (312) db_name ::= NK_ID */ + { 344, -1 }, /* (313) table_name ::= NK_ID */ + { 336, -1 }, /* (314) column_name ::= NK_ID */ + { 351, -1 }, /* (315) function_name ::= NK_ID */ + { 376, -1 }, /* (316) table_alias ::= NK_ID */ + { 377, -1 }, /* (317) column_alias ::= NK_ID */ + { 310, -1 }, /* (318) user_name ::= NK_ID */ + { 363, -1 }, /* (319) topic_name ::= NK_ID */ + { 370, -1 }, /* (320) stream_name ::= NK_ID */ + { 365, -1 }, /* (321) cgroup_name ::= NK_ID */ + { 378, -1 }, /* (322) expression ::= literal */ + { 378, -1 }, /* (323) expression ::= pseudo_column */ + { 378, -1 }, /* (324) expression ::= column_reference */ + { 378, -1 }, /* (325) expression ::= function_expression */ + { 378, -1 }, /* (326) expression ::= subquery */ + { 378, -3 }, /* (327) expression ::= NK_LP expression NK_RP */ + { 378, -2 }, /* (328) expression ::= NK_PLUS expression */ + { 378, -2 }, /* (329) expression ::= NK_MINUS expression */ + { 378, -3 }, /* (330) expression ::= expression NK_PLUS expression */ + { 378, -3 }, /* (331) expression ::= expression NK_MINUS expression */ + { 378, -3 }, /* (332) expression ::= expression NK_STAR expression */ + { 378, -3 }, /* (333) expression ::= expression NK_SLASH expression */ + { 378, -3 }, /* (334) expression ::= expression NK_REM expression */ + { 378, -3 }, /* (335) expression ::= column_reference NK_ARROW NK_STRING */ + { 378, -3 }, /* (336) expression ::= expression NK_BITAND expression */ + { 378, -3 }, /* (337) expression ::= expression NK_BITOR expression */ + { 341, -1 }, /* (338) expression_list ::= expression */ + { 341, -3 }, /* (339) expression_list ::= expression_list NK_COMMA expression */ + { 380, -1 }, /* (340) column_reference ::= column_name */ + { 380, -3 }, /* (341) column_reference ::= table_name NK_DOT column_name */ + { 379, -1 }, /* (342) pseudo_column ::= ROWTS */ + { 379, -1 }, /* (343) pseudo_column ::= TBNAME */ + { 379, -3 }, /* (344) pseudo_column ::= table_name NK_DOT TBNAME */ + { 379, -1 }, /* (345) pseudo_column ::= QSTART */ + { 379, -1 }, /* (346) pseudo_column ::= QEND */ + { 379, -1 }, /* (347) pseudo_column ::= QDURATION */ + { 379, -1 }, /* (348) pseudo_column ::= WSTART */ + { 379, -1 }, /* (349) pseudo_column ::= WEND */ + { 379, -1 }, /* (350) pseudo_column ::= WDURATION */ + { 381, -4 }, /* (351) function_expression ::= function_name NK_LP expression_list NK_RP */ + { 381, -4 }, /* (352) function_expression ::= star_func NK_LP star_func_para_list NK_RP */ + { 381, -6 }, /* (353) function_expression ::= CAST NK_LP expression AS type_name NK_RP */ + { 381, -1 }, /* (354) function_expression ::= literal_func */ + { 374, -3 }, /* (355) literal_func ::= noarg_func NK_LP NK_RP */ + { 374, -1 }, /* (356) literal_func ::= NOW */ + { 385, -1 }, /* (357) noarg_func ::= NOW */ + { 385, -1 }, /* (358) noarg_func ::= TODAY */ + { 385, -1 }, /* (359) noarg_func ::= TIMEZONE */ + { 385, -1 }, /* (360) noarg_func ::= DATABASE */ + { 385, -1 }, /* (361) noarg_func ::= CLIENT_VERSION */ + { 385, -1 }, /* (362) noarg_func ::= SERVER_VERSION */ + { 385, -1 }, /* (363) noarg_func ::= SERVER_STATUS */ + { 385, -1 }, /* (364) noarg_func ::= CURRENT_USER */ + { 385, -1 }, /* (365) noarg_func ::= USER */ + { 383, -1 }, /* (366) star_func ::= COUNT */ + { 383, -1 }, /* (367) star_func ::= FIRST */ + { 383, -1 }, /* (368) star_func ::= LAST */ + { 383, -1 }, /* (369) star_func ::= LAST_ROW */ + { 384, -1 }, /* (370) star_func_para_list ::= NK_STAR */ + { 384, -1 }, /* (371) star_func_para_list ::= other_para_list */ + { 386, -1 }, /* (372) other_para_list ::= star_func_para */ + { 386, -3 }, /* (373) other_para_list ::= other_para_list NK_COMMA star_func_para */ + { 387, -1 }, /* (374) star_func_para ::= expression */ + { 387, -3 }, /* (375) star_func_para ::= table_name NK_DOT NK_STAR */ + { 388, -3 }, /* (376) predicate ::= expression compare_op expression */ + { 388, -5 }, /* (377) predicate ::= expression BETWEEN expression AND expression */ + { 388, -6 }, /* (378) predicate ::= expression NOT BETWEEN expression AND expression */ + { 388, -3 }, /* (379) predicate ::= expression IS NULL */ + { 388, -4 }, /* (380) predicate ::= expression IS NOT NULL */ + { 388, -3 }, /* (381) predicate ::= expression in_op in_predicate_value */ + { 389, -1 }, /* (382) compare_op ::= NK_LT */ + { 389, -1 }, /* (383) compare_op ::= NK_GT */ + { 389, -1 }, /* (384) compare_op ::= NK_LE */ + { 389, -1 }, /* (385) compare_op ::= NK_GE */ + { 389, -1 }, /* (386) compare_op ::= NK_NE */ + { 389, -1 }, /* (387) compare_op ::= NK_EQ */ + { 389, -1 }, /* (388) compare_op ::= LIKE */ + { 389, -2 }, /* (389) compare_op ::= NOT LIKE */ + { 389, -1 }, /* (390) compare_op ::= MATCH */ + { 389, -1 }, /* (391) compare_op ::= NMATCH */ + { 389, -1 }, /* (392) compare_op ::= CONTAINS */ + { 390, -1 }, /* (393) in_op ::= IN */ + { 390, -2 }, /* (394) in_op ::= NOT IN */ + { 391, -3 }, /* (395) in_predicate_value ::= NK_LP literal_list NK_RP */ + { 392, -1 }, /* (396) boolean_value_expression ::= boolean_primary */ + { 392, -2 }, /* (397) boolean_value_expression ::= NOT boolean_primary */ + { 392, -3 }, /* (398) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ + { 392, -3 }, /* (399) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ + { 393, -1 }, /* (400) boolean_primary ::= predicate */ + { 393, -3 }, /* (401) boolean_primary ::= NK_LP boolean_value_expression NK_RP */ + { 394, -1 }, /* (402) common_expression ::= expression */ + { 394, -1 }, /* (403) common_expression ::= boolean_value_expression */ + { 395, 0 }, /* (404) from_clause_opt ::= */ + { 395, -2 }, /* (405) from_clause_opt ::= FROM table_reference_list */ + { 396, -1 }, /* (406) table_reference_list ::= table_reference */ + { 396, -3 }, /* (407) table_reference_list ::= table_reference_list NK_COMMA table_reference */ + { 397, -1 }, /* (408) table_reference ::= table_primary */ + { 397, -1 }, /* (409) table_reference ::= joined_table */ + { 398, -2 }, /* (410) table_primary ::= table_name alias_opt */ + { 398, -4 }, /* (411) table_primary ::= db_name NK_DOT table_name alias_opt */ + { 398, -2 }, /* (412) table_primary ::= subquery alias_opt */ + { 398, -1 }, /* (413) table_primary ::= parenthesized_joined_table */ + { 400, 0 }, /* (414) alias_opt ::= */ + { 400, -1 }, /* (415) alias_opt ::= table_alias */ + { 400, -2 }, /* (416) alias_opt ::= AS table_alias */ + { 401, -3 }, /* (417) parenthesized_joined_table ::= NK_LP joined_table NK_RP */ + { 401, -3 }, /* (418) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ + { 399, -6 }, /* (419) joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ + { 402, 0 }, /* (420) join_type ::= */ + { 402, -1 }, /* (421) join_type ::= INNER */ + { 404, -12 }, /* (422) query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ + { 405, 0 }, /* (423) set_quantifier_opt ::= */ + { 405, -1 }, /* (424) set_quantifier_opt ::= DISTINCT */ + { 405, -1 }, /* (425) set_quantifier_opt ::= ALL */ + { 406, -1 }, /* (426) select_list ::= select_item */ + { 406, -3 }, /* (427) select_list ::= select_list NK_COMMA select_item */ + { 414, -1 }, /* (428) select_item ::= NK_STAR */ + { 414, -1 }, /* (429) select_item ::= common_expression */ + { 414, -2 }, /* (430) select_item ::= common_expression column_alias */ + { 414, -3 }, /* (431) select_item ::= common_expression AS column_alias */ + { 414, -3 }, /* (432) select_item ::= table_name NK_DOT NK_STAR */ + { 372, 0 }, /* (433) where_clause_opt ::= */ + { 372, -2 }, /* (434) where_clause_opt ::= WHERE search_condition */ + { 407, 0 }, /* (435) partition_by_clause_opt ::= */ + { 407, -3 }, /* (436) partition_by_clause_opt ::= PARTITION BY expression_list */ + { 411, 0 }, /* (437) twindow_clause_opt ::= */ + { 411, -6 }, /* (438) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ + { 411, -4 }, /* (439) twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */ + { 411, -6 }, /* (440) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ + { 411, -8 }, /* (441) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ + { 359, 0 }, /* (442) sliding_opt ::= */ + { 359, -4 }, /* (443) sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ + { 410, 0 }, /* (444) fill_opt ::= */ + { 410, -4 }, /* (445) fill_opt ::= FILL NK_LP fill_mode NK_RP */ + { 410, -6 }, /* (446) fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ + { 415, -1 }, /* (447) fill_mode ::= NONE */ + { 415, -1 }, /* (448) fill_mode ::= PREV */ + { 415, -1 }, /* (449) fill_mode ::= NULL */ + { 415, -1 }, /* (450) fill_mode ::= LINEAR */ + { 415, -1 }, /* (451) fill_mode ::= NEXT */ + { 412, 0 }, /* (452) group_by_clause_opt ::= */ + { 412, -3 }, /* (453) group_by_clause_opt ::= GROUP BY group_by_list */ + { 416, -1 }, /* (454) group_by_list ::= expression */ + { 416, -3 }, /* (455) group_by_list ::= group_by_list NK_COMMA expression */ + { 413, 0 }, /* (456) having_clause_opt ::= */ + { 413, -2 }, /* (457) having_clause_opt ::= HAVING search_condition */ + { 408, 0 }, /* (458) range_opt ::= */ + { 408, -6 }, /* (459) range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP */ + { 409, 0 }, /* (460) every_opt ::= */ + { 409, -4 }, /* (461) every_opt ::= EVERY NK_LP duration_literal NK_RP */ + { 364, -4 }, /* (462) query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */ + { 417, -1 }, /* (463) query_expression_body ::= query_primary */ + { 417, -4 }, /* (464) query_expression_body ::= query_expression_body UNION ALL query_expression_body */ + { 417, -3 }, /* (465) query_expression_body ::= query_expression_body UNION query_expression_body */ + { 421, -1 }, /* (466) query_primary ::= query_specification */ + { 421, -6 }, /* (467) query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */ + { 418, 0 }, /* (468) order_by_clause_opt ::= */ + { 418, -3 }, /* (469) order_by_clause_opt ::= ORDER BY sort_specification_list */ + { 419, 0 }, /* (470) slimit_clause_opt ::= */ + { 419, -2 }, /* (471) slimit_clause_opt ::= SLIMIT NK_INTEGER */ + { 419, -4 }, /* (472) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ + { 419, -4 }, /* (473) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + { 420, 0 }, /* (474) limit_clause_opt ::= */ + { 420, -2 }, /* (475) limit_clause_opt ::= LIMIT NK_INTEGER */ + { 420, -4 }, /* (476) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ + { 420, -4 }, /* (477) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + { 382, -3 }, /* (478) subquery ::= NK_LP query_expression NK_RP */ + { 403, -1 }, /* (479) search_condition ::= common_expression */ + { 422, -1 }, /* (480) sort_specification_list ::= sort_specification */ + { 422, -3 }, /* (481) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ + { 423, -3 }, /* (482) sort_specification ::= expression ordering_specification_opt null_ordering_opt */ + { 424, 0 }, /* (483) ordering_specification_opt ::= */ + { 424, -1 }, /* (484) ordering_specification_opt ::= ASC */ + { 424, -1 }, /* (485) ordering_specification_opt ::= DESC */ + { 425, 0 }, /* (486) null_ordering_opt ::= */ + { 425, -2 }, /* (487) null_ordering_opt ::= NULLS FIRST */ + { 425, -2 }, /* (488) null_ordering_opt ::= NULLS LAST */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -3465,69 +3415,69 @@ static YYACTIONTYPE yy_reduce( yy_destructor(yypParser,308,&yymsp[0].minor); break; case 24: /* cmd ::= CREATE USER user_name PASS NK_STRING sysinfo_opt */ -{ pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-3].minor.yy617, &yymsp[-1].minor.yy0, yymsp[0].minor.yy215); } +{ pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-3].minor.yy209, &yymsp[-1].minor.yy0, yymsp[0].minor.yy59); } break; case 25: /* cmd ::= ALTER USER user_name PASS NK_STRING */ -{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy617, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy209, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); } break; case 26: /* cmd ::= ALTER USER user_name ENABLE NK_INTEGER */ -{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy617, TSDB_ALTER_USER_ENABLE, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy209, TSDB_ALTER_USER_ENABLE, &yymsp[0].minor.yy0); } break; case 27: /* cmd ::= ALTER USER user_name SYSINFO NK_INTEGER */ -{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy617, TSDB_ALTER_USER_SYSINFO, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy209, TSDB_ALTER_USER_SYSINFO, &yymsp[0].minor.yy0); } break; case 28: /* cmd ::= DROP USER user_name */ -{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy617); } +{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy209); } break; case 29: /* sysinfo_opt ::= */ -{ yymsp[1].minor.yy215 = 1; } +{ yymsp[1].minor.yy59 = 1; } break; case 30: /* sysinfo_opt ::= SYSINFO NK_INTEGER */ -{ yymsp[-1].minor.yy215 = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); } +{ yymsp[-1].minor.yy59 = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); } break; case 31: /* cmd ::= GRANT privileges ON priv_level TO user_name */ -{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-4].minor.yy473, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy617); } +{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-4].minor.yy69, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy209); } break; case 32: /* cmd ::= REVOKE privileges ON priv_level FROM user_name */ -{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-4].minor.yy473, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy617); } +{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-4].minor.yy69, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy209); } break; case 33: /* privileges ::= ALL */ -{ yymsp[0].minor.yy473 = PRIVILEGE_TYPE_ALL; } +{ yymsp[0].minor.yy69 = PRIVILEGE_TYPE_ALL; } break; case 34: /* privileges ::= priv_type_list */ case 35: /* priv_type_list ::= priv_type */ yytestcase(yyruleno==35); -{ yylhsminor.yy473 = yymsp[0].minor.yy473; } - yymsp[0].minor.yy473 = yylhsminor.yy473; +{ yylhsminor.yy69 = yymsp[0].minor.yy69; } + yymsp[0].minor.yy69 = yylhsminor.yy69; break; case 36: /* priv_type_list ::= priv_type_list NK_COMMA priv_type */ -{ yylhsminor.yy473 = yymsp[-2].minor.yy473 | yymsp[0].minor.yy473; } - yymsp[-2].minor.yy473 = yylhsminor.yy473; +{ yylhsminor.yy69 = yymsp[-2].minor.yy69 | yymsp[0].minor.yy69; } + yymsp[-2].minor.yy69 = yylhsminor.yy69; break; case 37: /* priv_type ::= READ */ -{ yymsp[0].minor.yy473 = PRIVILEGE_TYPE_READ; } +{ yymsp[0].minor.yy69 = PRIVILEGE_TYPE_READ; } break; case 38: /* priv_type ::= WRITE */ -{ yymsp[0].minor.yy473 = PRIVILEGE_TYPE_WRITE; } +{ yymsp[0].minor.yy69 = PRIVILEGE_TYPE_WRITE; } break; case 39: /* priv_level ::= NK_STAR NK_DOT NK_STAR */ -{ yylhsminor.yy617 = yymsp[-2].minor.yy0; } - yymsp[-2].minor.yy617 = yylhsminor.yy617; +{ yylhsminor.yy209 = yymsp[-2].minor.yy0; } + yymsp[-2].minor.yy209 = yylhsminor.yy209; break; case 40: /* priv_level ::= db_name NK_DOT NK_STAR */ -{ yylhsminor.yy617 = yymsp[-2].minor.yy617; } - yymsp[-2].minor.yy617 = yylhsminor.yy617; +{ yylhsminor.yy209 = yymsp[-2].minor.yy209; } + yymsp[-2].minor.yy209 = yylhsminor.yy209; break; case 41: /* cmd ::= CREATE DNODE dnode_endpoint */ -{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy617, NULL); } +{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy209, NULL); } break; case 42: /* cmd ::= CREATE DNODE dnode_endpoint PORT NK_INTEGER */ -{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy0); } break; case 43: /* cmd ::= DROP DNODE NK_INTEGER */ { pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy0); } break; case 44: /* cmd ::= DROP DNODE dnode_endpoint */ -{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy617); } +{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy209); } break; case 45: /* cmd ::= ALTER DNODE NK_INTEGER NK_STRING */ { pCxt->pRootNode = createAlterDnodeStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, NULL); } @@ -3544,31 +3494,31 @@ static YYACTIONTYPE yy_reduce( case 49: /* dnode_endpoint ::= NK_STRING */ case 50: /* dnode_endpoint ::= NK_ID */ yytestcase(yyruleno==50); case 51: /* dnode_endpoint ::= NK_IPTOKEN */ yytestcase(yyruleno==51); - case 314: /* db_name ::= NK_ID */ yytestcase(yyruleno==314); - case 315: /* table_name ::= NK_ID */ yytestcase(yyruleno==315); - case 316: /* column_name ::= NK_ID */ yytestcase(yyruleno==316); - case 317: /* function_name ::= NK_ID */ yytestcase(yyruleno==317); - case 318: /* table_alias ::= NK_ID */ yytestcase(yyruleno==318); - case 319: /* column_alias ::= NK_ID */ yytestcase(yyruleno==319); - case 320: /* user_name ::= NK_ID */ yytestcase(yyruleno==320); - case 321: /* topic_name ::= NK_ID */ yytestcase(yyruleno==321); - case 322: /* stream_name ::= NK_ID */ yytestcase(yyruleno==322); - case 323: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==323); - case 359: /* noarg_func ::= NOW */ yytestcase(yyruleno==359); - case 360: /* noarg_func ::= TODAY */ yytestcase(yyruleno==360); - case 361: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==361); - case 362: /* noarg_func ::= DATABASE */ yytestcase(yyruleno==362); - case 363: /* noarg_func ::= CLIENT_VERSION */ yytestcase(yyruleno==363); - case 364: /* noarg_func ::= SERVER_VERSION */ yytestcase(yyruleno==364); - case 365: /* noarg_func ::= SERVER_STATUS */ yytestcase(yyruleno==365); - case 366: /* noarg_func ::= CURRENT_USER */ yytestcase(yyruleno==366); - case 367: /* noarg_func ::= USER */ yytestcase(yyruleno==367); - case 368: /* star_func ::= COUNT */ yytestcase(yyruleno==368); - case 369: /* star_func ::= FIRST */ yytestcase(yyruleno==369); - case 370: /* star_func ::= LAST */ yytestcase(yyruleno==370); - case 371: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==371); -{ yylhsminor.yy617 = yymsp[0].minor.yy0; } - yymsp[0].minor.yy617 = yylhsminor.yy617; + case 312: /* db_name ::= NK_ID */ yytestcase(yyruleno==312); + case 313: /* table_name ::= NK_ID */ yytestcase(yyruleno==313); + case 314: /* column_name ::= NK_ID */ yytestcase(yyruleno==314); + case 315: /* function_name ::= NK_ID */ yytestcase(yyruleno==315); + case 316: /* table_alias ::= NK_ID */ yytestcase(yyruleno==316); + case 317: /* column_alias ::= NK_ID */ yytestcase(yyruleno==317); + case 318: /* user_name ::= NK_ID */ yytestcase(yyruleno==318); + case 319: /* topic_name ::= NK_ID */ yytestcase(yyruleno==319); + case 320: /* stream_name ::= NK_ID */ yytestcase(yyruleno==320); + case 321: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==321); + case 357: /* noarg_func ::= NOW */ yytestcase(yyruleno==357); + case 358: /* noarg_func ::= TODAY */ yytestcase(yyruleno==358); + case 359: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==359); + case 360: /* noarg_func ::= DATABASE */ yytestcase(yyruleno==360); + case 361: /* noarg_func ::= CLIENT_VERSION */ yytestcase(yyruleno==361); + case 362: /* noarg_func ::= SERVER_VERSION */ yytestcase(yyruleno==362); + case 363: /* noarg_func ::= SERVER_STATUS */ yytestcase(yyruleno==363); + case 364: /* noarg_func ::= CURRENT_USER */ yytestcase(yyruleno==364); + case 365: /* noarg_func ::= USER */ yytestcase(yyruleno==365); + case 366: /* star_func ::= COUNT */ yytestcase(yyruleno==366); + case 367: /* star_func ::= FIRST */ yytestcase(yyruleno==367); + case 368: /* star_func ::= LAST */ yytestcase(yyruleno==368); + case 369: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==369); +{ yylhsminor.yy209 = yymsp[0].minor.yy0; } + yymsp[0].minor.yy209 = yylhsminor.yy209; break; case 52: /* cmd ::= ALTER LOCAL NK_STRING */ { pCxt->pRootNode = createAlterLocalStmt(pCxt, &yymsp[0].minor.yy0, NULL); } @@ -3601,189 +3551,189 @@ static YYACTIONTYPE yy_reduce( { pCxt->pRootNode = createDropComponentNodeStmt(pCxt, QUERY_NODE_DROP_MNODE_STMT, &yymsp[0].minor.yy0); } break; case 62: /* cmd ::= CREATE DATABASE not_exists_opt db_name db_options */ -{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy313, &yymsp[-1].minor.yy617, yymsp[0].minor.yy840); } +{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy293, &yymsp[-1].minor.yy209, yymsp[0].minor.yy272); } break; case 63: /* cmd ::= DROP DATABASE exists_opt db_name */ -{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy313, &yymsp[0].minor.yy617); } +{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy293, &yymsp[0].minor.yy209); } break; case 64: /* cmd ::= USE db_name */ -{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy617); } +{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy209); } break; case 65: /* cmd ::= ALTER DATABASE db_name alter_db_options */ -{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy617, yymsp[0].minor.yy840); } +{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy209, yymsp[0].minor.yy272); } break; case 66: /* cmd ::= FLUSH DATABASE db_name */ -{ pCxt->pRootNode = createFlushDatabaseStmt(pCxt, &yymsp[0].minor.yy617); } +{ pCxt->pRootNode = createFlushDatabaseStmt(pCxt, &yymsp[0].minor.yy209); } break; case 67: /* cmd ::= TRIM DATABASE db_name */ -{ pCxt->pRootNode = createTrimDatabaseStmt(pCxt, &yymsp[0].minor.yy617); } +{ pCxt->pRootNode = createTrimDatabaseStmt(pCxt, &yymsp[0].minor.yy209); } break; case 68: /* not_exists_opt ::= IF NOT EXISTS */ -{ yymsp[-2].minor.yy313 = true; } +{ yymsp[-2].minor.yy293 = true; } break; case 69: /* not_exists_opt ::= */ case 71: /* exists_opt ::= */ yytestcase(yyruleno==71); case 255: /* analyze_opt ::= */ yytestcase(yyruleno==255); case 262: /* agg_func_opt ::= */ yytestcase(yyruleno==262); - case 425: /* set_quantifier_opt ::= */ yytestcase(yyruleno==425); -{ yymsp[1].minor.yy313 = false; } + case 423: /* set_quantifier_opt ::= */ yytestcase(yyruleno==423); +{ yymsp[1].minor.yy293 = false; } break; case 70: /* exists_opt ::= IF EXISTS */ -{ yymsp[-1].minor.yy313 = true; } +{ yymsp[-1].minor.yy293 = true; } break; case 72: /* db_options ::= */ -{ yymsp[1].minor.yy840 = createDefaultDatabaseOptions(pCxt); } +{ yymsp[1].minor.yy272 = createDefaultDatabaseOptions(pCxt); } break; case 73: /* db_options ::= db_options BUFFER NK_INTEGER */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 74: /* db_options ::= db_options CACHEMODEL NK_STRING */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_CACHEMODEL, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_CACHEMODEL, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 75: /* db_options ::= db_options CACHESIZE NK_INTEGER */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_CACHESIZE, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_CACHESIZE, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 76: /* db_options ::= db_options COMP NK_INTEGER */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_COMP, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_COMP, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 77: /* db_options ::= db_options DURATION NK_INTEGER */ case 78: /* db_options ::= db_options DURATION NK_VARIABLE */ yytestcase(yyruleno==78); -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_DAYS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_DAYS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 79: /* db_options ::= db_options MAXROWS NK_INTEGER */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 80: /* db_options ::= db_options MINROWS NK_INTEGER */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 81: /* db_options ::= db_options KEEP integer_list */ case 82: /* db_options ::= db_options KEEP variable_list */ yytestcase(yyruleno==82); -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_KEEP, yymsp[0].minor.yy544); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_KEEP, yymsp[0].minor.yy172); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 83: /* db_options ::= db_options PAGES NK_INTEGER */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_PAGES, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_PAGES, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 84: /* db_options ::= db_options PAGESIZE NK_INTEGER */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 85: /* db_options ::= db_options PRECISION NK_STRING */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 86: /* db_options ::= db_options REPLICA NK_INTEGER */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 87: /* db_options ::= db_options STRICT NK_STRING */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_STRICT, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_STRICT, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 88: /* db_options ::= db_options VGROUPS NK_INTEGER */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 89: /* db_options ::= db_options SINGLE_STABLE NK_INTEGER */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 90: /* db_options ::= db_options RETENTIONS retention_list */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_RETENTIONS, yymsp[0].minor.yy544); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_RETENTIONS, yymsp[0].minor.yy172); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 91: /* db_options ::= db_options SCHEMALESS NK_INTEGER */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_SCHEMALESS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_SCHEMALESS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 92: /* db_options ::= db_options WAL_LEVEL NK_INTEGER */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_WAL, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_WAL, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 93: /* db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 94: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_WAL_RETENTION_PERIOD, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_WAL_RETENTION_PERIOD, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 95: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */ { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-3].minor.yy840, DB_OPTION_WAL_RETENTION_PERIOD, &t); + yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-3].minor.yy272, DB_OPTION_WAL_RETENTION_PERIOD, &t); } - yymsp[-3].minor.yy840 = yylhsminor.yy840; + yymsp[-3].minor.yy272 = yylhsminor.yy272; break; case 96: /* db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_WAL_RETENTION_SIZE, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_WAL_RETENTION_SIZE, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 97: /* db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */ { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-3].minor.yy840, DB_OPTION_WAL_RETENTION_SIZE, &t); + yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-3].minor.yy272, DB_OPTION_WAL_RETENTION_SIZE, &t); } - yymsp[-3].minor.yy840 = yylhsminor.yy840; + yymsp[-3].minor.yy272 = yylhsminor.yy272; break; case 98: /* db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_WAL_ROLL_PERIOD, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_WAL_ROLL_PERIOD, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 99: /* db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER */ -{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_WAL_SEGMENT_SIZE, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_WAL_SEGMENT_SIZE, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 100: /* alter_db_options ::= alter_db_option */ -{ yylhsminor.yy840 = createAlterDatabaseOptions(pCxt); yylhsminor.yy840 = setAlterDatabaseOption(pCxt, yylhsminor.yy840, &yymsp[0].minor.yy95); } - yymsp[0].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createAlterDatabaseOptions(pCxt); yylhsminor.yy272 = setAlterDatabaseOption(pCxt, yylhsminor.yy272, &yymsp[0].minor.yy5); } + yymsp[0].minor.yy272 = yylhsminor.yy272; break; case 101: /* alter_db_options ::= alter_db_options alter_db_option */ -{ yylhsminor.yy840 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy840, &yymsp[0].minor.yy95); } - yymsp[-1].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy272, &yymsp[0].minor.yy5); } + yymsp[-1].minor.yy272 = yylhsminor.yy272; break; case 102: /* alter_db_option ::= CACHEMODEL NK_STRING */ -{ yymsp[-1].minor.yy95.type = DB_OPTION_CACHEMODEL; yymsp[-1].minor.yy95.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy5.type = DB_OPTION_CACHEMODEL; yymsp[-1].minor.yy5.val = yymsp[0].minor.yy0; } break; case 103: /* alter_db_option ::= CACHESIZE NK_INTEGER */ -{ yymsp[-1].minor.yy95.type = DB_OPTION_CACHESIZE; yymsp[-1].minor.yy95.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy5.type = DB_OPTION_CACHESIZE; yymsp[-1].minor.yy5.val = yymsp[0].minor.yy0; } break; case 104: /* alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER */ -{ yymsp[-1].minor.yy95.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy95.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy5.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy5.val = yymsp[0].minor.yy0; } break; case 105: /* alter_db_option ::= KEEP integer_list */ case 106: /* alter_db_option ::= KEEP variable_list */ yytestcase(yyruleno==106); -{ yymsp[-1].minor.yy95.type = DB_OPTION_KEEP; yymsp[-1].minor.yy95.pList = yymsp[0].minor.yy544; } +{ yymsp[-1].minor.yy5.type = DB_OPTION_KEEP; yymsp[-1].minor.yy5.pList = yymsp[0].minor.yy172; } break; case 107: /* alter_db_option ::= WAL_LEVEL NK_INTEGER */ -{ yymsp[-1].minor.yy95.type = DB_OPTION_WAL; yymsp[-1].minor.yy95.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy5.type = DB_OPTION_WAL; yymsp[-1].minor.yy5.val = yymsp[0].minor.yy0; } break; case 108: /* integer_list ::= NK_INTEGER */ -{ yylhsminor.yy544 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy544 = yylhsminor.yy544; +{ yylhsminor.yy172 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy172 = yylhsminor.yy172; break; case 109: /* integer_list ::= integer_list NK_COMMA NK_INTEGER */ - case 284: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==284); -{ yylhsminor.yy544 = addNodeToList(pCxt, yymsp[-2].minor.yy544, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - yymsp[-2].minor.yy544 = yylhsminor.yy544; + case 282: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==282); +{ yylhsminor.yy172 = addNodeToList(pCxt, yymsp[-2].minor.yy172, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } + yymsp[-2].minor.yy172 = yylhsminor.yy172; break; case 110: /* variable_list ::= NK_VARIABLE */ -{ yylhsminor.yy544 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy544 = yylhsminor.yy544; +{ yylhsminor.yy172 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy172 = yylhsminor.yy172; break; case 111: /* variable_list ::= variable_list NK_COMMA NK_VARIABLE */ -{ yylhsminor.yy544 = addNodeToList(pCxt, yymsp[-2].minor.yy544, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[-2].minor.yy544 = yylhsminor.yy544; +{ yylhsminor.yy172 = addNodeToList(pCxt, yymsp[-2].minor.yy172, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[-2].minor.yy172 = yylhsminor.yy172; break; case 112: /* retention_list ::= retention */ case 132: /* multi_create_clause ::= create_subtable_clause */ yytestcase(yyruleno==132); @@ -3792,266 +3742,266 @@ static YYACTIONTYPE yy_reduce( case 185: /* rollup_func_list ::= rollup_func_name */ yytestcase(yyruleno==185); case 190: /* col_name_list ::= col_name */ yytestcase(yyruleno==190); case 238: /* func_list ::= func */ yytestcase(yyruleno==238); - case 312: /* literal_list ::= signed_literal */ yytestcase(yyruleno==312); - case 374: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==374); - case 428: /* select_list ::= select_item */ yytestcase(yyruleno==428); - case 482: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==482); -{ yylhsminor.yy544 = createNodeList(pCxt, yymsp[0].minor.yy840); } - yymsp[0].minor.yy544 = yylhsminor.yy544; + case 310: /* literal_list ::= signed_literal */ yytestcase(yyruleno==310); + case 372: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==372); + case 426: /* select_list ::= select_item */ yytestcase(yyruleno==426); + case 480: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==480); +{ yylhsminor.yy172 = createNodeList(pCxt, yymsp[0].minor.yy272); } + yymsp[0].minor.yy172 = yylhsminor.yy172; break; case 113: /* retention_list ::= retention_list NK_COMMA retention */ case 143: /* column_def_list ::= column_def_list NK_COMMA column_def */ yytestcase(yyruleno==143); case 186: /* rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name */ yytestcase(yyruleno==186); case 191: /* col_name_list ::= col_name_list NK_COMMA col_name */ yytestcase(yyruleno==191); case 239: /* func_list ::= func_list NK_COMMA func */ yytestcase(yyruleno==239); - case 313: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==313); - case 375: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==375); - case 429: /* select_list ::= select_list NK_COMMA select_item */ yytestcase(yyruleno==429); - case 483: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==483); -{ yylhsminor.yy544 = addNodeToList(pCxt, yymsp[-2].minor.yy544, yymsp[0].minor.yy840); } - yymsp[-2].minor.yy544 = yylhsminor.yy544; + case 311: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==311); + case 373: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==373); + case 427: /* select_list ::= select_list NK_COMMA select_item */ yytestcase(yyruleno==427); + case 481: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==481); +{ yylhsminor.yy172 = addNodeToList(pCxt, yymsp[-2].minor.yy172, yymsp[0].minor.yy272); } + yymsp[-2].minor.yy172 = yylhsminor.yy172; break; case 114: /* retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */ -{ yylhsminor.yy840 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 115: /* cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */ case 117: /* cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ yytestcase(yyruleno==117); -{ pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy313, yymsp[-5].minor.yy840, yymsp[-3].minor.yy544, yymsp[-1].minor.yy544, yymsp[0].minor.yy840); } +{ pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy293, yymsp[-5].minor.yy272, yymsp[-3].minor.yy172, yymsp[-1].minor.yy172, yymsp[0].minor.yy272); } break; case 116: /* cmd ::= CREATE TABLE multi_create_clause */ -{ pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy544); } +{ pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy172); } break; case 118: /* cmd ::= DROP TABLE multi_drop_clause */ -{ pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[0].minor.yy544); } +{ pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[0].minor.yy172); } break; case 119: /* cmd ::= DROP STABLE exists_opt full_table_name */ -{ pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy313, yymsp[0].minor.yy840); } +{ pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy293, yymsp[0].minor.yy272); } break; case 120: /* cmd ::= ALTER TABLE alter_table_clause */ - case 286: /* cmd ::= query_expression */ yytestcase(yyruleno==286); -{ pCxt->pRootNode = yymsp[0].minor.yy840; } + case 284: /* cmd ::= query_expression */ yytestcase(yyruleno==284); +{ pCxt->pRootNode = yymsp[0].minor.yy272; } break; case 121: /* cmd ::= ALTER STABLE alter_table_clause */ -{ pCxt->pRootNode = setAlterSuperTableType(yymsp[0].minor.yy840); } +{ pCxt->pRootNode = setAlterSuperTableType(yymsp[0].minor.yy272); } break; case 122: /* alter_table_clause ::= full_table_name alter_table_options */ -{ yylhsminor.yy840 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy840, yymsp[0].minor.yy840); } - yymsp[-1].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy272, yymsp[0].minor.yy272); } + yymsp[-1].minor.yy272 = yylhsminor.yy272; break; case 123: /* alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */ -{ yylhsminor.yy840 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy840, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-1].minor.yy617, yymsp[0].minor.yy784); } - yymsp[-4].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy272, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-1].minor.yy209, yymsp[0].minor.yy616); } + yymsp[-4].minor.yy272 = yylhsminor.yy272; break; case 124: /* alter_table_clause ::= full_table_name DROP COLUMN column_name */ -{ yylhsminor.yy840 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy840, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy617); } - yymsp[-3].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy272, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy209); } + yymsp[-3].minor.yy272 = yylhsminor.yy272; break; case 125: /* alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */ -{ yylhsminor.yy840 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy840, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy617, yymsp[0].minor.yy784); } - yymsp[-4].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy272, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy209, yymsp[0].minor.yy616); } + yymsp[-4].minor.yy272 = yylhsminor.yy272; break; case 126: /* alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */ -{ yylhsminor.yy840 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy840, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy617, &yymsp[0].minor.yy617); } - yymsp[-4].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy272, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy209, &yymsp[0].minor.yy209); } + yymsp[-4].minor.yy272 = yylhsminor.yy272; break; case 127: /* alter_table_clause ::= full_table_name ADD TAG column_name type_name */ -{ yylhsminor.yy840 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy840, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy617, yymsp[0].minor.yy784); } - yymsp[-4].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy272, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy209, yymsp[0].minor.yy616); } + yymsp[-4].minor.yy272 = yylhsminor.yy272; break; case 128: /* alter_table_clause ::= full_table_name DROP TAG column_name */ -{ yylhsminor.yy840 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy840, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy617); } - yymsp[-3].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy272, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy209); } + yymsp[-3].minor.yy272 = yylhsminor.yy272; break; case 129: /* alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */ -{ yylhsminor.yy840 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy840, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy617, yymsp[0].minor.yy784); } - yymsp[-4].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy272, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy209, yymsp[0].minor.yy616); } + yymsp[-4].minor.yy272 = yylhsminor.yy272; break; case 130: /* alter_table_clause ::= full_table_name RENAME TAG column_name column_name */ -{ yylhsminor.yy840 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy840, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy617, &yymsp[0].minor.yy617); } - yymsp[-4].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy272, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy209, &yymsp[0].minor.yy209); } + yymsp[-4].minor.yy272 = yylhsminor.yy272; break; case 131: /* alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */ -{ yylhsminor.yy840 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy840, &yymsp[-2].minor.yy617, yymsp[0].minor.yy840); } - yymsp[-5].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy272, &yymsp[-2].minor.yy209, yymsp[0].minor.yy272); } + yymsp[-5].minor.yy272 = yylhsminor.yy272; break; case 133: /* multi_create_clause ::= multi_create_clause create_subtable_clause */ case 136: /* multi_drop_clause ::= multi_drop_clause drop_table_clause */ yytestcase(yyruleno==136); -{ yylhsminor.yy544 = addNodeToList(pCxt, yymsp[-1].minor.yy544, yymsp[0].minor.yy840); } - yymsp[-1].minor.yy544 = yylhsminor.yy544; +{ yylhsminor.yy172 = addNodeToList(pCxt, yymsp[-1].minor.yy172, yymsp[0].minor.yy272); } + yymsp[-1].minor.yy172 = yylhsminor.yy172; break; case 134: /* create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP expression_list NK_RP table_options */ -{ yylhsminor.yy840 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy313, yymsp[-8].minor.yy840, yymsp[-6].minor.yy840, yymsp[-5].minor.yy544, yymsp[-2].minor.yy544, yymsp[0].minor.yy840); } - yymsp[-9].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy293, yymsp[-8].minor.yy272, yymsp[-6].minor.yy272, yymsp[-5].minor.yy172, yymsp[-2].minor.yy172, yymsp[0].minor.yy272); } + yymsp[-9].minor.yy272 = yylhsminor.yy272; break; case 137: /* drop_table_clause ::= exists_opt full_table_name */ -{ yylhsminor.yy840 = createDropTableClause(pCxt, yymsp[-1].minor.yy313, yymsp[0].minor.yy840); } - yymsp[-1].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createDropTableClause(pCxt, yymsp[-1].minor.yy293, yymsp[0].minor.yy272); } + yymsp[-1].minor.yy272 = yylhsminor.yy272; break; case 138: /* specific_cols_opt ::= */ case 169: /* tags_def_opt ::= */ yytestcase(yyruleno==169); - case 437: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==437); - case 454: /* group_by_clause_opt ::= */ yytestcase(yyruleno==454); - case 470: /* order_by_clause_opt ::= */ yytestcase(yyruleno==470); -{ yymsp[1].minor.yy544 = NULL; } + case 435: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==435); + case 452: /* group_by_clause_opt ::= */ yytestcase(yyruleno==452); + case 468: /* order_by_clause_opt ::= */ yytestcase(yyruleno==468); +{ yymsp[1].minor.yy172 = NULL; } break; case 139: /* specific_cols_opt ::= NK_LP col_name_list NK_RP */ -{ yymsp[-2].minor.yy544 = yymsp[-1].minor.yy544; } +{ yymsp[-2].minor.yy172 = yymsp[-1].minor.yy172; } break; case 140: /* full_table_name ::= table_name */ -{ yylhsminor.yy840 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy617, NULL); } - yymsp[0].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy209, NULL); } + yymsp[0].minor.yy272 = yylhsminor.yy272; break; case 141: /* full_table_name ::= db_name NK_DOT table_name */ -{ yylhsminor.yy840 = createRealTableNode(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy617, NULL); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createRealTableNode(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy209, NULL); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 144: /* column_def ::= column_name type_name */ -{ yylhsminor.yy840 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy617, yymsp[0].minor.yy784, NULL); } - yymsp[-1].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy209, yymsp[0].minor.yy616, NULL); } + yymsp[-1].minor.yy272 = yylhsminor.yy272; break; case 145: /* column_def ::= column_name type_name COMMENT NK_STRING */ -{ yylhsminor.yy840 = createColumnDefNode(pCxt, &yymsp[-3].minor.yy617, yymsp[-2].minor.yy784, &yymsp[0].minor.yy0); } - yymsp[-3].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createColumnDefNode(pCxt, &yymsp[-3].minor.yy209, yymsp[-2].minor.yy616, &yymsp[0].minor.yy0); } + yymsp[-3].minor.yy272 = yylhsminor.yy272; break; case 146: /* type_name ::= BOOL */ -{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_BOOL); } +{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_BOOL); } break; case 147: /* type_name ::= TINYINT */ -{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_TINYINT); } +{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_TINYINT); } break; case 148: /* type_name ::= SMALLINT */ -{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_SMALLINT); } +{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_SMALLINT); } break; case 149: /* type_name ::= INT */ case 150: /* type_name ::= INTEGER */ yytestcase(yyruleno==150); -{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_INT); } +{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_INT); } break; case 151: /* type_name ::= BIGINT */ -{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_BIGINT); } +{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_BIGINT); } break; case 152: /* type_name ::= FLOAT */ -{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_FLOAT); } +{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_FLOAT); } break; case 153: /* type_name ::= DOUBLE */ -{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_DOUBLE); } +{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_DOUBLE); } break; case 154: /* type_name ::= BINARY NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy784 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); } +{ yymsp[-3].minor.yy616 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); } break; case 155: /* type_name ::= TIMESTAMP */ -{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); } +{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); } break; case 156: /* type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy784 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); } +{ yymsp[-3].minor.yy616 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); } break; case 157: /* type_name ::= TINYINT UNSIGNED */ -{ yymsp[-1].minor.yy784 = createDataType(TSDB_DATA_TYPE_UTINYINT); } +{ yymsp[-1].minor.yy616 = createDataType(TSDB_DATA_TYPE_UTINYINT); } break; case 158: /* type_name ::= SMALLINT UNSIGNED */ -{ yymsp[-1].minor.yy784 = createDataType(TSDB_DATA_TYPE_USMALLINT); } +{ yymsp[-1].minor.yy616 = createDataType(TSDB_DATA_TYPE_USMALLINT); } break; case 159: /* type_name ::= INT UNSIGNED */ -{ yymsp[-1].minor.yy784 = createDataType(TSDB_DATA_TYPE_UINT); } +{ yymsp[-1].minor.yy616 = createDataType(TSDB_DATA_TYPE_UINT); } break; case 160: /* type_name ::= BIGINT UNSIGNED */ -{ yymsp[-1].minor.yy784 = createDataType(TSDB_DATA_TYPE_UBIGINT); } +{ yymsp[-1].minor.yy616 = createDataType(TSDB_DATA_TYPE_UBIGINT); } break; case 161: /* type_name ::= JSON */ -{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_JSON); } +{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_JSON); } break; case 162: /* type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy784 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); } +{ yymsp[-3].minor.yy616 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); } break; case 163: /* type_name ::= MEDIUMBLOB */ -{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); } +{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); } break; case 164: /* type_name ::= BLOB */ -{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_BLOB); } +{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_BLOB); } break; case 165: /* type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy784 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); } +{ yymsp[-3].minor.yy616 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); } break; case 166: /* type_name ::= DECIMAL */ -{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_DECIMAL); } +{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_DECIMAL); } break; case 167: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy784 = createDataType(TSDB_DATA_TYPE_DECIMAL); } +{ yymsp[-3].minor.yy616 = createDataType(TSDB_DATA_TYPE_DECIMAL); } break; case 168: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ -{ yymsp[-5].minor.yy784 = createDataType(TSDB_DATA_TYPE_DECIMAL); } +{ yymsp[-5].minor.yy616 = createDataType(TSDB_DATA_TYPE_DECIMAL); } break; case 170: /* tags_def_opt ::= tags_def */ - case 373: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==373); -{ yylhsminor.yy544 = yymsp[0].minor.yy544; } - yymsp[0].minor.yy544 = yylhsminor.yy544; + case 371: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==371); +{ yylhsminor.yy172 = yymsp[0].minor.yy172; } + yymsp[0].minor.yy172 = yylhsminor.yy172; break; case 171: /* tags_def ::= TAGS NK_LP column_def_list NK_RP */ -{ yymsp[-3].minor.yy544 = yymsp[-1].minor.yy544; } +{ yymsp[-3].minor.yy172 = yymsp[-1].minor.yy172; } break; case 172: /* table_options ::= */ -{ yymsp[1].minor.yy840 = createDefaultTableOptions(pCxt); } +{ yymsp[1].minor.yy272 = createDefaultTableOptions(pCxt); } break; case 173: /* table_options ::= table_options COMMENT NK_STRING */ -{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-2].minor.yy840, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-2].minor.yy272, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 174: /* table_options ::= table_options MAX_DELAY duration_list */ -{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-2].minor.yy840, TABLE_OPTION_MAXDELAY, yymsp[0].minor.yy544); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-2].minor.yy272, TABLE_OPTION_MAXDELAY, yymsp[0].minor.yy172); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 175: /* table_options ::= table_options WATERMARK duration_list */ -{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-2].minor.yy840, TABLE_OPTION_WATERMARK, yymsp[0].minor.yy544); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-2].minor.yy272, TABLE_OPTION_WATERMARK, yymsp[0].minor.yy172); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 176: /* table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP */ -{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-4].minor.yy840, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy544); } - yymsp[-4].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-4].minor.yy272, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy172); } + yymsp[-4].minor.yy272 = yylhsminor.yy272; break; case 177: /* table_options ::= table_options TTL NK_INTEGER */ -{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-2].minor.yy840, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-2].minor.yy272, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 178: /* table_options ::= table_options SMA NK_LP col_name_list NK_RP */ -{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-4].minor.yy840, TABLE_OPTION_SMA, yymsp[-1].minor.yy544); } - yymsp[-4].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-4].minor.yy272, TABLE_OPTION_SMA, yymsp[-1].minor.yy172); } + yymsp[-4].minor.yy272 = yylhsminor.yy272; break; case 179: /* alter_table_options ::= alter_table_option */ -{ yylhsminor.yy840 = createAlterTableOptions(pCxt); yylhsminor.yy840 = setTableOption(pCxt, yylhsminor.yy840, yymsp[0].minor.yy95.type, &yymsp[0].minor.yy95.val); } - yymsp[0].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createAlterTableOptions(pCxt); yylhsminor.yy272 = setTableOption(pCxt, yylhsminor.yy272, yymsp[0].minor.yy5.type, &yymsp[0].minor.yy5.val); } + yymsp[0].minor.yy272 = yylhsminor.yy272; break; case 180: /* alter_table_options ::= alter_table_options alter_table_option */ -{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-1].minor.yy840, yymsp[0].minor.yy95.type, &yymsp[0].minor.yy95.val); } - yymsp[-1].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-1].minor.yy272, yymsp[0].minor.yy5.type, &yymsp[0].minor.yy5.val); } + yymsp[-1].minor.yy272 = yylhsminor.yy272; break; case 181: /* alter_table_option ::= COMMENT NK_STRING */ -{ yymsp[-1].minor.yy95.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy95.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy5.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy5.val = yymsp[0].minor.yy0; } break; case 182: /* alter_table_option ::= TTL NK_INTEGER */ -{ yymsp[-1].minor.yy95.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy95.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy5.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy5.val = yymsp[0].minor.yy0; } break; case 183: /* duration_list ::= duration_literal */ - case 340: /* expression_list ::= expression */ yytestcase(yyruleno==340); -{ yylhsminor.yy544 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy840)); } - yymsp[0].minor.yy544 = yylhsminor.yy544; + case 338: /* expression_list ::= expression */ yytestcase(yyruleno==338); +{ yylhsminor.yy172 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy272)); } + yymsp[0].minor.yy172 = yylhsminor.yy172; break; case 184: /* duration_list ::= duration_list NK_COMMA duration_literal */ - case 341: /* expression_list ::= expression_list NK_COMMA expression */ yytestcase(yyruleno==341); -{ yylhsminor.yy544 = addNodeToList(pCxt, yymsp[-2].minor.yy544, releaseRawExprNode(pCxt, yymsp[0].minor.yy840)); } - yymsp[-2].minor.yy544 = yylhsminor.yy544; + case 339: /* expression_list ::= expression_list NK_COMMA expression */ yytestcase(yyruleno==339); +{ yylhsminor.yy172 = addNodeToList(pCxt, yymsp[-2].minor.yy172, releaseRawExprNode(pCxt, yymsp[0].minor.yy272)); } + yymsp[-2].minor.yy172 = yylhsminor.yy172; break; case 187: /* rollup_func_name ::= function_name */ -{ yylhsminor.yy840 = createFunctionNode(pCxt, &yymsp[0].minor.yy617, NULL); } - yymsp[0].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createFunctionNode(pCxt, &yymsp[0].minor.yy209, NULL); } + yymsp[0].minor.yy272 = yylhsminor.yy272; break; case 188: /* rollup_func_name ::= FIRST */ case 189: /* rollup_func_name ::= LAST */ yytestcase(yyruleno==189); -{ yylhsminor.yy840 = createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL); } - yymsp[0].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL); } + yymsp[0].minor.yy272 = yylhsminor.yy272; break; case 192: /* col_name ::= column_name */ -{ yylhsminor.yy840 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy617); } - yymsp[0].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy209); } + yymsp[0].minor.yy272 = yylhsminor.yy272; break; case 193: /* cmd ::= SHOW DNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DNODES_STMT); } @@ -4063,13 +4013,13 @@ static YYACTIONTYPE yy_reduce( { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DATABASES_STMT); } break; case 196: /* cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */ -{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TABLES_STMT, yymsp[-2].minor.yy840, yymsp[0].minor.yy840, OP_TYPE_LIKE); } +{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TABLES_STMT, yymsp[-2].minor.yy272, yymsp[0].minor.yy272, OP_TYPE_LIKE); } break; case 197: /* cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */ -{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy840, yymsp[0].minor.yy840, OP_TYPE_LIKE); } +{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy272, yymsp[0].minor.yy272, OP_TYPE_LIKE); } break; case 198: /* cmd ::= SHOW db_name_cond_opt VGROUPS */ -{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy840, NULL, OP_TYPE_LIKE); } +{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy272, NULL, OP_TYPE_LIKE); } break; case 199: /* cmd ::= SHOW MNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MNODES_STMT); } @@ -4084,7 +4034,7 @@ static YYACTIONTYPE yy_reduce( { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_FUNCTIONS_STMT); } break; case 203: /* cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */ -{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[0].minor.yy840, yymsp[-1].minor.yy840, OP_TYPE_EQUAL); } +{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[0].minor.yy272, yymsp[-1].minor.yy272, OP_TYPE_EQUAL); } break; case 204: /* cmd ::= SHOW STREAMS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STREAMS_STMT); } @@ -4103,13 +4053,13 @@ static YYACTIONTYPE yy_reduce( { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_LICENCES_STMT); } break; case 210: /* cmd ::= SHOW CREATE DATABASE db_name */ -{ pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy617); } +{ pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy209); } break; case 211: /* cmd ::= SHOW CREATE TABLE full_table_name */ -{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy840); } +{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy272); } break; case 212: /* cmd ::= SHOW CREATE STABLE full_table_name */ -{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy840); } +{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy272); } break; case 213: /* cmd ::= SHOW QUERIES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QUERIES_STMT); } @@ -4142,7 +4092,7 @@ static YYACTIONTYPE yy_reduce( { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TRANSACTIONS_STMT); } break; case 223: /* cmd ::= SHOW TABLE DISTRIBUTED full_table_name */ -{ pCxt->pRootNode = createShowTableDistributedStmt(pCxt, yymsp[0].minor.yy840); } +{ pCxt->pRootNode = createShowTableDistributedStmt(pCxt, yymsp[0].minor.yy272); } break; case 224: /* cmd ::= SHOW CONSUMERS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CONSUMERS_STMT); } @@ -4151,713 +4101,711 @@ static YYACTIONTYPE yy_reduce( { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT); } break; case 226: /* cmd ::= SHOW TAGS FROM table_name_cond from_db_opt */ -{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TAGS_STMT, yymsp[0].minor.yy840, yymsp[-1].minor.yy840, OP_TYPE_EQUAL); } +{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TAGS_STMT, yymsp[0].minor.yy272, yymsp[-1].minor.yy272, OP_TYPE_EQUAL); } break; case 227: /* db_name_cond_opt ::= */ case 232: /* from_db_opt ::= */ yytestcase(yyruleno==232); -{ yymsp[1].minor.yy840 = createDefaultDatabaseCondValue(pCxt); } +{ yymsp[1].minor.yy272 = createDefaultDatabaseCondValue(pCxt); } break; case 228: /* db_name_cond_opt ::= db_name NK_DOT */ -{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy617); } - yymsp[-1].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy209); } + yymsp[-1].minor.yy272 = yylhsminor.yy272; break; case 229: /* like_pattern_opt ::= */ - case 268: /* into_opt ::= */ yytestcase(yyruleno==268); - case 406: /* from_clause_opt ::= */ yytestcase(yyruleno==406); - case 435: /* where_clause_opt ::= */ yytestcase(yyruleno==435); - case 439: /* twindow_clause_opt ::= */ yytestcase(yyruleno==439); - case 444: /* sliding_opt ::= */ yytestcase(yyruleno==444); - case 446: /* fill_opt ::= */ yytestcase(yyruleno==446); - case 458: /* having_clause_opt ::= */ yytestcase(yyruleno==458); - case 460: /* range_opt ::= */ yytestcase(yyruleno==460); - case 462: /* every_opt ::= */ yytestcase(yyruleno==462); - case 472: /* slimit_clause_opt ::= */ yytestcase(yyruleno==472); - case 476: /* limit_clause_opt ::= */ yytestcase(yyruleno==476); -{ yymsp[1].minor.yy840 = NULL; } + case 404: /* from_clause_opt ::= */ yytestcase(yyruleno==404); + case 433: /* where_clause_opt ::= */ yytestcase(yyruleno==433); + case 437: /* twindow_clause_opt ::= */ yytestcase(yyruleno==437); + case 442: /* sliding_opt ::= */ yytestcase(yyruleno==442); + case 444: /* fill_opt ::= */ yytestcase(yyruleno==444); + case 456: /* having_clause_opt ::= */ yytestcase(yyruleno==456); + case 458: /* range_opt ::= */ yytestcase(yyruleno==458); + case 460: /* every_opt ::= */ yytestcase(yyruleno==460); + case 470: /* slimit_clause_opt ::= */ yytestcase(yyruleno==470); + case 474: /* limit_clause_opt ::= */ yytestcase(yyruleno==474); +{ yymsp[1].minor.yy272 = NULL; } break; case 230: /* like_pattern_opt ::= LIKE NK_STRING */ -{ yymsp[-1].minor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } +{ yymsp[-1].minor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } break; case 231: /* table_name_cond ::= table_name */ -{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy617); } - yymsp[0].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy209); } + yymsp[0].minor.yy272 = yylhsminor.yy272; break; case 233: /* from_db_opt ::= FROM db_name */ -{ yymsp[-1].minor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy617); } +{ yymsp[-1].minor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy209); } break; case 234: /* cmd ::= CREATE SMA INDEX not_exists_opt full_table_name ON full_table_name index_options */ -{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy313, yymsp[-3].minor.yy840, yymsp[-1].minor.yy840, NULL, yymsp[0].minor.yy840); } +{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy293, yymsp[-3].minor.yy272, yymsp[-1].minor.yy272, NULL, yymsp[0].minor.yy272); } break; case 235: /* cmd ::= DROP INDEX exists_opt full_table_name */ -{ pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-1].minor.yy313, yymsp[0].minor.yy840); } +{ pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-1].minor.yy293, yymsp[0].minor.yy272); } break; case 236: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt */ -{ yymsp[-9].minor.yy840 = createIndexOption(pCxt, yymsp[-7].minor.yy544, releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), NULL, yymsp[-1].minor.yy840, yymsp[0].minor.yy840); } +{ yymsp[-9].minor.yy272 = createIndexOption(pCxt, yymsp[-7].minor.yy172, releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), NULL, yymsp[-1].minor.yy272, yymsp[0].minor.yy272); } break; case 237: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt */ -{ yymsp[-11].minor.yy840 = createIndexOption(pCxt, yymsp[-9].minor.yy544, releaseRawExprNode(pCxt, yymsp[-5].minor.yy840), releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), yymsp[-1].minor.yy840, yymsp[0].minor.yy840); } +{ yymsp[-11].minor.yy272 = createIndexOption(pCxt, yymsp[-9].minor.yy172, releaseRawExprNode(pCxt, yymsp[-5].minor.yy272), releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), yymsp[-1].minor.yy272, yymsp[0].minor.yy272); } break; case 240: /* func ::= function_name NK_LP expression_list NK_RP */ -{ yylhsminor.yy840 = createFunctionNode(pCxt, &yymsp[-3].minor.yy617, yymsp[-1].minor.yy544); } - yymsp[-3].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = createFunctionNode(pCxt, &yymsp[-3].minor.yy209, yymsp[-1].minor.yy172); } + yymsp[-3].minor.yy272 = yylhsminor.yy272; break; case 241: /* sma_stream_opt ::= */ - case 270: /* stream_options ::= */ yytestcase(yyruleno==270); -{ yymsp[1].minor.yy840 = createStreamOptions(pCxt); } + case 268: /* stream_options ::= */ yytestcase(yyruleno==268); +{ yymsp[1].minor.yy272 = createStreamOptions(pCxt); } break; case 242: /* sma_stream_opt ::= stream_options WATERMARK duration_literal */ - case 274: /* stream_options ::= stream_options WATERMARK duration_literal */ yytestcase(yyruleno==274); -{ ((SStreamOptions*)yymsp[-2].minor.yy840)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy840); yylhsminor.yy840 = yymsp[-2].minor.yy840; } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + case 272: /* stream_options ::= stream_options WATERMARK duration_literal */ yytestcase(yyruleno==272); +{ ((SStreamOptions*)yymsp[-2].minor.yy272)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy272); yylhsminor.yy272 = yymsp[-2].minor.yy272; } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 243: /* sma_stream_opt ::= stream_options MAX_DELAY duration_literal */ -{ ((SStreamOptions*)yymsp[-2].minor.yy840)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy840); yylhsminor.yy840 = yymsp[-2].minor.yy840; } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ ((SStreamOptions*)yymsp[-2].minor.yy272)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy272); yylhsminor.yy272 = yymsp[-2].minor.yy272; } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 244: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_expression */ -{ pCxt->pRootNode = createCreateTopicStmtUseQuery(pCxt, yymsp[-3].minor.yy313, &yymsp[-2].minor.yy617, yymsp[0].minor.yy840); } +{ pCxt->pRootNode = createCreateTopicStmtUseQuery(pCxt, yymsp[-3].minor.yy293, &yymsp[-2].minor.yy209, yymsp[0].minor.yy272); } break; case 245: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name */ -{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-4].minor.yy313, &yymsp[-3].minor.yy617, &yymsp[0].minor.yy617, false); } +{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-4].minor.yy293, &yymsp[-3].minor.yy209, &yymsp[0].minor.yy209, false); } break; case 246: /* cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name */ -{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-6].minor.yy313, &yymsp[-5].minor.yy617, &yymsp[0].minor.yy617, true); } +{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-6].minor.yy293, &yymsp[-5].minor.yy209, &yymsp[0].minor.yy209, true); } break; case 247: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */ -{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-4].minor.yy313, &yymsp[-3].minor.yy617, yymsp[0].minor.yy840, false); } +{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-4].minor.yy293, &yymsp[-3].minor.yy209, yymsp[0].minor.yy272, false); } break; case 248: /* cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name */ -{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-6].minor.yy313, &yymsp[-5].minor.yy617, yymsp[0].minor.yy840, true); } +{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-6].minor.yy293, &yymsp[-5].minor.yy209, yymsp[0].minor.yy272, true); } break; case 249: /* cmd ::= DROP TOPIC exists_opt topic_name */ -{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy313, &yymsp[0].minor.yy617); } +{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy293, &yymsp[0].minor.yy209); } break; case 250: /* cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */ -{ pCxt->pRootNode = createDropCGroupStmt(pCxt, yymsp[-3].minor.yy313, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy617); } +{ pCxt->pRootNode = createDropCGroupStmt(pCxt, yymsp[-3].minor.yy293, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy209); } break; case 251: /* cmd ::= DESC full_table_name */ case 252: /* cmd ::= DESCRIBE full_table_name */ yytestcase(yyruleno==252); -{ pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy840); } +{ pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy272); } break; case 253: /* cmd ::= RESET QUERY CACHE */ { pCxt->pRootNode = createResetQueryCacheStmt(pCxt); } break; case 254: /* cmd ::= EXPLAIN analyze_opt explain_options query_expression */ -{ pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy313, yymsp[-1].minor.yy840, yymsp[0].minor.yy840); } +{ pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy293, yymsp[-1].minor.yy272, yymsp[0].minor.yy272); } break; case 256: /* analyze_opt ::= ANALYZE */ case 263: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==263); - case 426: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==426); -{ yymsp[0].minor.yy313 = true; } + case 424: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==424); +{ yymsp[0].minor.yy293 = true; } break; case 257: /* explain_options ::= */ -{ yymsp[1].minor.yy840 = createDefaultExplainOptions(pCxt); } +{ yymsp[1].minor.yy272 = createDefaultExplainOptions(pCxt); } break; case 258: /* explain_options ::= explain_options VERBOSE NK_BOOL */ -{ yylhsminor.yy840 = setExplainVerbose(pCxt, yymsp[-2].minor.yy840, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setExplainVerbose(pCxt, yymsp[-2].minor.yy272, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 259: /* explain_options ::= explain_options RATIO NK_FLOAT */ -{ yylhsminor.yy840 = setExplainRatio(pCxt, yymsp[-2].minor.yy840, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; +{ yylhsminor.yy272 = setExplainRatio(pCxt, yymsp[-2].minor.yy272, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; case 260: /* cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */ -{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy313, yymsp[-8].minor.yy313, &yymsp[-5].minor.yy617, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy784, yymsp[0].minor.yy844); } +{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy293, yymsp[-8].minor.yy293, &yymsp[-5].minor.yy209, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy616, yymsp[0].minor.yy232); } break; case 261: /* cmd ::= DROP FUNCTION exists_opt function_name */ -{ pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy313, &yymsp[0].minor.yy617); } +{ pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy293, &yymsp[0].minor.yy209); } break; case 264: /* bufsize_opt ::= */ -{ yymsp[1].minor.yy844 = 0; } +{ yymsp[1].minor.yy232 = 0; } break; case 265: /* bufsize_opt ::= BUFSIZE NK_INTEGER */ -{ yymsp[-1].minor.yy844 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); } +{ yymsp[-1].minor.yy232 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 266: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */ -{ pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-5].minor.yy313, &yymsp[-4].minor.yy617, yymsp[-2].minor.yy840, yymsp[-3].minor.yy840, yymsp[0].minor.yy840); } + case 266: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name AS query_expression */ +{ pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-6].minor.yy293, &yymsp[-5].minor.yy209, yymsp[-2].minor.yy272, yymsp[-4].minor.yy272, yymsp[0].minor.yy272); } break; case 267: /* cmd ::= DROP STREAM exists_opt stream_name */ -{ pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy313, &yymsp[0].minor.yy617); } +{ pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy293, &yymsp[0].minor.yy209); } break; - case 269: /* into_opt ::= INTO full_table_name */ - case 407: /* from_clause_opt ::= FROM table_reference_list */ yytestcase(yyruleno==407); - case 436: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==436); - case 459: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==459); -{ yymsp[-1].minor.yy840 = yymsp[0].minor.yy840; } + case 269: /* stream_options ::= stream_options TRIGGER AT_ONCE */ +{ ((SStreamOptions*)yymsp[-2].minor.yy272)->triggerType = STREAM_TRIGGER_AT_ONCE; yylhsminor.yy272 = yymsp[-2].minor.yy272; } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 271: /* stream_options ::= stream_options TRIGGER AT_ONCE */ -{ ((SStreamOptions*)yymsp[-2].minor.yy840)->triggerType = STREAM_TRIGGER_AT_ONCE; yylhsminor.yy840 = yymsp[-2].minor.yy840; } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + case 270: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ +{ ((SStreamOptions*)yymsp[-2].minor.yy272)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; yylhsminor.yy272 = yymsp[-2].minor.yy272; } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 272: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ -{ ((SStreamOptions*)yymsp[-2].minor.yy840)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; yylhsminor.yy840 = yymsp[-2].minor.yy840; } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + case 271: /* stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */ +{ ((SStreamOptions*)yymsp[-3].minor.yy272)->triggerType = STREAM_TRIGGER_MAX_DELAY; ((SStreamOptions*)yymsp[-3].minor.yy272)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy272); yylhsminor.yy272 = yymsp[-3].minor.yy272; } + yymsp[-3].minor.yy272 = yylhsminor.yy272; break; - case 273: /* stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */ -{ ((SStreamOptions*)yymsp[-3].minor.yy840)->triggerType = STREAM_TRIGGER_MAX_DELAY; ((SStreamOptions*)yymsp[-3].minor.yy840)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy840); yylhsminor.yy840 = yymsp[-3].minor.yy840; } - yymsp[-3].minor.yy840 = yylhsminor.yy840; + case 273: /* stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */ +{ ((SStreamOptions*)yymsp[-3].minor.yy272)->ignoreExpired = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); yylhsminor.yy272 = yymsp[-3].minor.yy272; } + yymsp[-3].minor.yy272 = yylhsminor.yy272; break; - case 275: /* stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */ -{ ((SStreamOptions*)yymsp[-3].minor.yy840)->ignoreExpired = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); yylhsminor.yy840 = yymsp[-3].minor.yy840; } - yymsp[-3].minor.yy840 = yylhsminor.yy840; - break; - case 276: /* cmd ::= KILL CONNECTION NK_INTEGER */ + case 274: /* cmd ::= KILL CONNECTION NK_INTEGER */ { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &yymsp[0].minor.yy0); } break; - case 277: /* cmd ::= KILL QUERY NK_STRING */ + case 275: /* cmd ::= KILL QUERY NK_STRING */ { pCxt->pRootNode = createKillQueryStmt(pCxt, &yymsp[0].minor.yy0); } break; - case 278: /* cmd ::= KILL TRANSACTION NK_INTEGER */ + case 276: /* cmd ::= KILL TRANSACTION NK_INTEGER */ { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_TRANSACTION_STMT, &yymsp[0].minor.yy0); } break; - case 279: /* cmd ::= BALANCE VGROUP */ + case 277: /* cmd ::= BALANCE VGROUP */ { pCxt->pRootNode = createBalanceVgroupStmt(pCxt); } break; - case 280: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ + case 278: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ { pCxt->pRootNode = createMergeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 281: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ -{ pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy544); } + case 279: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ +{ pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy172); } break; - case 282: /* cmd ::= SPLIT VGROUP NK_INTEGER */ + case 280: /* cmd ::= SPLIT VGROUP NK_INTEGER */ { pCxt->pRootNode = createSplitVgroupStmt(pCxt, &yymsp[0].minor.yy0); } break; - case 283: /* dnode_list ::= DNODE NK_INTEGER */ -{ yymsp[-1].minor.yy544 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - break; - case 285: /* cmd ::= DELETE FROM full_table_name where_clause_opt */ -{ pCxt->pRootNode = createDeleteStmt(pCxt, yymsp[-1].minor.yy840, yymsp[0].minor.yy840); } - break; - case 287: /* cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression */ -{ pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-4].minor.yy840, yymsp[-2].minor.yy544, yymsp[0].minor.yy840); } - break; - case 288: /* cmd ::= INSERT INTO full_table_name query_expression */ -{ pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-1].minor.yy840, NULL, yymsp[0].minor.yy840); } - break; - case 289: /* literal ::= NK_INTEGER */ -{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy840 = yylhsminor.yy840; - break; - case 290: /* literal ::= NK_FLOAT */ -{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy840 = yylhsminor.yy840; - break; - case 291: /* literal ::= NK_STRING */ -{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy840 = yylhsminor.yy840; - break; - case 292: /* literal ::= NK_BOOL */ -{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy840 = yylhsminor.yy840; - break; - case 293: /* literal ::= TIMESTAMP NK_STRING */ -{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); } - yymsp[-1].minor.yy840 = yylhsminor.yy840; - break; - case 294: /* literal ::= duration_literal */ - case 304: /* signed_literal ::= signed */ yytestcase(yyruleno==304); - case 324: /* expression ::= literal */ yytestcase(yyruleno==324); - case 325: /* expression ::= pseudo_column */ yytestcase(yyruleno==325); - case 326: /* expression ::= column_reference */ yytestcase(yyruleno==326); - case 327: /* expression ::= function_expression */ yytestcase(yyruleno==327); - case 328: /* expression ::= subquery */ yytestcase(yyruleno==328); - case 356: /* function_expression ::= literal_func */ yytestcase(yyruleno==356); - case 398: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==398); - case 402: /* boolean_primary ::= predicate */ yytestcase(yyruleno==402); - case 404: /* common_expression ::= expression */ yytestcase(yyruleno==404); - case 405: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==405); - case 408: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==408); - case 410: /* table_reference ::= table_primary */ yytestcase(yyruleno==410); - case 411: /* table_reference ::= joined_table */ yytestcase(yyruleno==411); - case 415: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==415); - case 465: /* query_expression_body ::= query_primary */ yytestcase(yyruleno==465); - case 468: /* query_primary ::= query_specification */ yytestcase(yyruleno==468); -{ yylhsminor.yy840 = yymsp[0].minor.yy840; } - yymsp[0].minor.yy840 = yylhsminor.yy840; - break; - case 295: /* literal ::= NULL */ -{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy840 = yylhsminor.yy840; - break; - case 296: /* literal ::= NK_QUESTION */ -{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy840 = yylhsminor.yy840; - break; - case 297: /* duration_literal ::= NK_VARIABLE */ -{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy840 = yylhsminor.yy840; - break; - case 298: /* signed ::= NK_INTEGER */ -{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy840 = yylhsminor.yy840; - break; - case 299: /* signed ::= NK_PLUS NK_INTEGER */ -{ yymsp[-1].minor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); } - break; - case 300: /* signed ::= NK_MINUS NK_INTEGER */ + case 281: /* dnode_list ::= DNODE NK_INTEGER */ +{ yymsp[-1].minor.yy172 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } + break; + case 283: /* cmd ::= DELETE FROM full_table_name where_clause_opt */ +{ pCxt->pRootNode = createDeleteStmt(pCxt, yymsp[-1].minor.yy272, yymsp[0].minor.yy272); } + break; + case 285: /* cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression */ +{ pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-4].minor.yy272, yymsp[-2].minor.yy172, yymsp[0].minor.yy272); } + break; + case 286: /* cmd ::= INSERT INTO full_table_name query_expression */ +{ pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-1].minor.yy272, NULL, yymsp[0].minor.yy272); } + break; + case 287: /* literal ::= NK_INTEGER */ +{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy272 = yylhsminor.yy272; + break; + case 288: /* literal ::= NK_FLOAT */ +{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy272 = yylhsminor.yy272; + break; + case 289: /* literal ::= NK_STRING */ +{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy272 = yylhsminor.yy272; + break; + case 290: /* literal ::= NK_BOOL */ +{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy272 = yylhsminor.yy272; + break; + case 291: /* literal ::= TIMESTAMP NK_STRING */ +{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); } + yymsp[-1].minor.yy272 = yylhsminor.yy272; + break; + case 292: /* literal ::= duration_literal */ + case 302: /* signed_literal ::= signed */ yytestcase(yyruleno==302); + case 322: /* expression ::= literal */ yytestcase(yyruleno==322); + case 323: /* expression ::= pseudo_column */ yytestcase(yyruleno==323); + case 324: /* expression ::= column_reference */ yytestcase(yyruleno==324); + case 325: /* expression ::= function_expression */ yytestcase(yyruleno==325); + case 326: /* expression ::= subquery */ yytestcase(yyruleno==326); + case 354: /* function_expression ::= literal_func */ yytestcase(yyruleno==354); + case 396: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==396); + case 400: /* boolean_primary ::= predicate */ yytestcase(yyruleno==400); + case 402: /* common_expression ::= expression */ yytestcase(yyruleno==402); + case 403: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==403); + case 406: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==406); + case 408: /* table_reference ::= table_primary */ yytestcase(yyruleno==408); + case 409: /* table_reference ::= joined_table */ yytestcase(yyruleno==409); + case 413: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==413); + case 463: /* query_expression_body ::= query_primary */ yytestcase(yyruleno==463); + case 466: /* query_primary ::= query_specification */ yytestcase(yyruleno==466); +{ yylhsminor.yy272 = yymsp[0].minor.yy272; } + yymsp[0].minor.yy272 = yylhsminor.yy272; + break; + case 293: /* literal ::= NULL */ +{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy272 = yylhsminor.yy272; + break; + case 294: /* literal ::= NK_QUESTION */ +{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy272 = yylhsminor.yy272; + break; + case 295: /* duration_literal ::= NK_VARIABLE */ +{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy272 = yylhsminor.yy272; + break; + case 296: /* signed ::= NK_INTEGER */ +{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy272 = yylhsminor.yy272; + break; + case 297: /* signed ::= NK_PLUS NK_INTEGER */ +{ yymsp[-1].minor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); } + break; + case 298: /* signed ::= NK_MINUS NK_INTEGER */ { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t); + yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t); } - yymsp[-1].minor.yy840 = yylhsminor.yy840; + yymsp[-1].minor.yy272 = yylhsminor.yy272; break; - case 301: /* signed ::= NK_FLOAT */ -{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy840 = yylhsminor.yy840; + case 299: /* signed ::= NK_FLOAT */ +{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy272 = yylhsminor.yy272; break; - case 302: /* signed ::= NK_PLUS NK_FLOAT */ -{ yymsp[-1].minor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } + case 300: /* signed ::= NK_PLUS NK_FLOAT */ +{ yymsp[-1].minor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } break; - case 303: /* signed ::= NK_MINUS NK_FLOAT */ + case 301: /* signed ::= NK_MINUS NK_FLOAT */ { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t); + yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t); } - yymsp[-1].minor.yy840 = yylhsminor.yy840; + yymsp[-1].minor.yy272 = yylhsminor.yy272; break; - case 305: /* signed_literal ::= NK_STRING */ -{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy840 = yylhsminor.yy840; + case 303: /* signed_literal ::= NK_STRING */ +{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy272 = yylhsminor.yy272; break; - case 306: /* signed_literal ::= NK_BOOL */ -{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy840 = yylhsminor.yy840; + case 304: /* signed_literal ::= NK_BOOL */ +{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy272 = yylhsminor.yy272; break; - case 307: /* signed_literal ::= TIMESTAMP NK_STRING */ -{ yymsp[-1].minor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); } + case 305: /* signed_literal ::= TIMESTAMP NK_STRING */ +{ yymsp[-1].minor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); } break; - case 308: /* signed_literal ::= duration_literal */ - case 310: /* signed_literal ::= literal_func */ yytestcase(yyruleno==310); - case 376: /* star_func_para ::= expression */ yytestcase(yyruleno==376); - case 431: /* select_item ::= common_expression */ yytestcase(yyruleno==431); - case 481: /* search_condition ::= common_expression */ yytestcase(yyruleno==481); -{ yylhsminor.yy840 = releaseRawExprNode(pCxt, yymsp[0].minor.yy840); } - yymsp[0].minor.yy840 = yylhsminor.yy840; + case 306: /* signed_literal ::= duration_literal */ + case 308: /* signed_literal ::= literal_func */ yytestcase(yyruleno==308); + case 374: /* star_func_para ::= expression */ yytestcase(yyruleno==374); + case 429: /* select_item ::= common_expression */ yytestcase(yyruleno==429); + case 479: /* search_condition ::= common_expression */ yytestcase(yyruleno==479); +{ yylhsminor.yy272 = releaseRawExprNode(pCxt, yymsp[0].minor.yy272); } + yymsp[0].minor.yy272 = yylhsminor.yy272; break; - case 309: /* signed_literal ::= NULL */ -{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy840 = yylhsminor.yy840; + case 307: /* signed_literal ::= NULL */ +{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy272 = yylhsminor.yy272; break; - case 311: /* signed_literal ::= NK_QUESTION */ -{ yylhsminor.yy840 = createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy840 = yylhsminor.yy840; + case 309: /* signed_literal ::= NK_QUESTION */ +{ yylhsminor.yy272 = createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy272 = yylhsminor.yy272; break; - case 329: /* expression ::= NK_LP expression NK_RP */ - case 403: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==403); -{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy840)); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + case 327: /* expression ::= NK_LP expression NK_RP */ + case 401: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==401); +{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy272)); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 330: /* expression ::= NK_PLUS expression */ + case 328: /* expression ::= NK_PLUS expression */ { - SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840); - yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy840)); + SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272); + yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy272)); } - yymsp[-1].minor.yy840 = yylhsminor.yy840; + yymsp[-1].minor.yy272 = yylhsminor.yy272; break; - case 331: /* expression ::= NK_MINUS expression */ + case 329: /* expression ::= NK_MINUS expression */ { - SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840); - yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy840), NULL)); + SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272); + yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy272), NULL)); } - yymsp[-1].minor.yy840 = yylhsminor.yy840; + yymsp[-1].minor.yy272 = yylhsminor.yy272; break; - case 332: /* expression ::= expression NK_PLUS expression */ + case 330: /* expression ::= expression NK_PLUS expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840); - yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272); + yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272))); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 333: /* expression ::= expression NK_MINUS expression */ + case 331: /* expression ::= expression NK_MINUS expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840); - yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272); + yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272))); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 334: /* expression ::= expression NK_STAR expression */ + case 332: /* expression ::= expression NK_STAR expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840); - yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272); + yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272))); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 335: /* expression ::= expression NK_SLASH expression */ + case 333: /* expression ::= expression NK_SLASH expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840); - yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272); + yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272))); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 336: /* expression ::= expression NK_REM expression */ + case 334: /* expression ::= expression NK_REM expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840); - yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_REM, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272); + yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_REM, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272))); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 337: /* expression ::= column_reference NK_ARROW NK_STRING */ + case 335: /* expression ::= column_reference NK_ARROW NK_STRING */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840); - yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272); + yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0))); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 338: /* expression ::= expression NK_BITAND expression */ + case 336: /* expression ::= expression NK_BITAND expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840); - yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272); + yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272))); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 339: /* expression ::= expression NK_BITOR expression */ + case 337: /* expression ::= expression NK_BITOR expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840); - yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272); + yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272))); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; - break; - case 342: /* column_reference ::= column_name */ -{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy617, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy617)); } - yymsp[0].minor.yy840 = yylhsminor.yy840; - break; - case 343: /* column_reference ::= table_name NK_DOT column_name */ -{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy617, createColumnNode(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy617)); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; - break; - case 344: /* pseudo_column ::= ROWTS */ - case 345: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==345); - case 347: /* pseudo_column ::= QSTART */ yytestcase(yyruleno==347); - case 348: /* pseudo_column ::= QEND */ yytestcase(yyruleno==348); - case 349: /* pseudo_column ::= QDURATION */ yytestcase(yyruleno==349); - case 350: /* pseudo_column ::= WSTART */ yytestcase(yyruleno==350); - case 351: /* pseudo_column ::= WEND */ yytestcase(yyruleno==351); - case 352: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==352); - case 358: /* literal_func ::= NOW */ yytestcase(yyruleno==358); -{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); } - yymsp[0].minor.yy840 = yylhsminor.yy840; - break; - case 346: /* pseudo_column ::= table_name NK_DOT TBNAME */ -{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy617)))); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; - break; - case 353: /* function_expression ::= function_name NK_LP expression_list NK_RP */ - case 354: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==354); -{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy617, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy617, yymsp[-1].minor.yy544)); } - yymsp[-3].minor.yy840 = yylhsminor.yy840; - break; - case 355: /* function_expression ::= CAST NK_LP expression AS type_name NK_RP */ -{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), yymsp[-1].minor.yy784)); } - yymsp[-5].minor.yy840 = yylhsminor.yy840; - break; - case 357: /* literal_func ::= noarg_func NK_LP NK_RP */ -{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy617, NULL)); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; - break; - case 372: /* star_func_para_list ::= NK_STAR */ -{ yylhsminor.yy544 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy544 = yylhsminor.yy544; - break; - case 377: /* star_func_para ::= table_name NK_DOT NK_STAR */ - case 434: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==434); -{ yylhsminor.yy840 = createColumnNode(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; - break; - case 378: /* predicate ::= expression compare_op expression */ - case 383: /* predicate ::= expression in_op in_predicate_value */ yytestcase(yyruleno==383); + yymsp[-2].minor.yy272 = yylhsminor.yy272; + break; + case 340: /* column_reference ::= column_name */ +{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy209, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy209)); } + yymsp[0].minor.yy272 = yylhsminor.yy272; + break; + case 341: /* column_reference ::= table_name NK_DOT column_name */ +{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy209, createColumnNode(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy209)); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; + break; + case 342: /* pseudo_column ::= ROWTS */ + case 343: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==343); + case 345: /* pseudo_column ::= QSTART */ yytestcase(yyruleno==345); + case 346: /* pseudo_column ::= QEND */ yytestcase(yyruleno==346); + case 347: /* pseudo_column ::= QDURATION */ yytestcase(yyruleno==347); + case 348: /* pseudo_column ::= WSTART */ yytestcase(yyruleno==348); + case 349: /* pseudo_column ::= WEND */ yytestcase(yyruleno==349); + case 350: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==350); + case 356: /* literal_func ::= NOW */ yytestcase(yyruleno==356); +{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); } + yymsp[0].minor.yy272 = yylhsminor.yy272; + break; + case 344: /* pseudo_column ::= table_name NK_DOT TBNAME */ +{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy209)))); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; + break; + case 351: /* function_expression ::= function_name NK_LP expression_list NK_RP */ + case 352: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==352); +{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy209, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy209, yymsp[-1].minor.yy172)); } + yymsp[-3].minor.yy272 = yylhsminor.yy272; + break; + case 353: /* function_expression ::= CAST NK_LP expression AS type_name NK_RP */ +{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), yymsp[-1].minor.yy616)); } + yymsp[-5].minor.yy272 = yylhsminor.yy272; + break; + case 355: /* literal_func ::= noarg_func NK_LP NK_RP */ +{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy209, NULL)); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; + break; + case 370: /* star_func_para_list ::= NK_STAR */ +{ yylhsminor.yy172 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy172 = yylhsminor.yy172; + break; + case 375: /* star_func_para ::= table_name NK_DOT NK_STAR */ + case 432: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==432); +{ yylhsminor.yy272 = createColumnNode(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; + break; + case 376: /* predicate ::= expression compare_op expression */ + case 381: /* predicate ::= expression in_op in_predicate_value */ yytestcase(yyruleno==381); { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840); - yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy198, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272); + yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy392, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272))); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 379: /* predicate ::= expression BETWEEN expression AND expression */ + case 377: /* predicate ::= expression BETWEEN expression AND expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy840); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840); - yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy840), releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy272); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272); + yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy272), releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272))); } - yymsp[-4].minor.yy840 = yylhsminor.yy840; + yymsp[-4].minor.yy272 = yylhsminor.yy272; break; - case 380: /* predicate ::= expression NOT BETWEEN expression AND expression */ + case 378: /* predicate ::= expression NOT BETWEEN expression AND expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy840); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840); - yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy840), releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy272); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272); + yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy272), releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272))); } - yymsp[-5].minor.yy840 = yylhsminor.yy840; + yymsp[-5].minor.yy272 = yylhsminor.yy272; break; - case 381: /* predicate ::= expression IS NULL */ + case 379: /* predicate ::= expression IS NULL */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840); - yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), NULL)); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272); + yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), NULL)); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 382: /* predicate ::= expression IS NOT NULL */ + case 380: /* predicate ::= expression IS NOT NULL */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy840); - yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), NULL)); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy272); + yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), NULL)); } - yymsp[-3].minor.yy840 = yylhsminor.yy840; + yymsp[-3].minor.yy272 = yylhsminor.yy272; break; - case 384: /* compare_op ::= NK_LT */ -{ yymsp[0].minor.yy198 = OP_TYPE_LOWER_THAN; } + case 382: /* compare_op ::= NK_LT */ +{ yymsp[0].minor.yy392 = OP_TYPE_LOWER_THAN; } break; - case 385: /* compare_op ::= NK_GT */ -{ yymsp[0].minor.yy198 = OP_TYPE_GREATER_THAN; } + case 383: /* compare_op ::= NK_GT */ +{ yymsp[0].minor.yy392 = OP_TYPE_GREATER_THAN; } break; - case 386: /* compare_op ::= NK_LE */ -{ yymsp[0].minor.yy198 = OP_TYPE_LOWER_EQUAL; } + case 384: /* compare_op ::= NK_LE */ +{ yymsp[0].minor.yy392 = OP_TYPE_LOWER_EQUAL; } break; - case 387: /* compare_op ::= NK_GE */ -{ yymsp[0].minor.yy198 = OP_TYPE_GREATER_EQUAL; } + case 385: /* compare_op ::= NK_GE */ +{ yymsp[0].minor.yy392 = OP_TYPE_GREATER_EQUAL; } break; - case 388: /* compare_op ::= NK_NE */ -{ yymsp[0].minor.yy198 = OP_TYPE_NOT_EQUAL; } + case 386: /* compare_op ::= NK_NE */ +{ yymsp[0].minor.yy392 = OP_TYPE_NOT_EQUAL; } break; - case 389: /* compare_op ::= NK_EQ */ -{ yymsp[0].minor.yy198 = OP_TYPE_EQUAL; } + case 387: /* compare_op ::= NK_EQ */ +{ yymsp[0].minor.yy392 = OP_TYPE_EQUAL; } break; - case 390: /* compare_op ::= LIKE */ -{ yymsp[0].minor.yy198 = OP_TYPE_LIKE; } + case 388: /* compare_op ::= LIKE */ +{ yymsp[0].minor.yy392 = OP_TYPE_LIKE; } break; - case 391: /* compare_op ::= NOT LIKE */ -{ yymsp[-1].minor.yy198 = OP_TYPE_NOT_LIKE; } + case 389: /* compare_op ::= NOT LIKE */ +{ yymsp[-1].minor.yy392 = OP_TYPE_NOT_LIKE; } break; - case 392: /* compare_op ::= MATCH */ -{ yymsp[0].minor.yy198 = OP_TYPE_MATCH; } + case 390: /* compare_op ::= MATCH */ +{ yymsp[0].minor.yy392 = OP_TYPE_MATCH; } break; - case 393: /* compare_op ::= NMATCH */ -{ yymsp[0].minor.yy198 = OP_TYPE_NMATCH; } + case 391: /* compare_op ::= NMATCH */ +{ yymsp[0].minor.yy392 = OP_TYPE_NMATCH; } break; - case 394: /* compare_op ::= CONTAINS */ -{ yymsp[0].minor.yy198 = OP_TYPE_JSON_CONTAINS; } + case 392: /* compare_op ::= CONTAINS */ +{ yymsp[0].minor.yy392 = OP_TYPE_JSON_CONTAINS; } break; - case 395: /* in_op ::= IN */ -{ yymsp[0].minor.yy198 = OP_TYPE_IN; } + case 393: /* in_op ::= IN */ +{ yymsp[0].minor.yy392 = OP_TYPE_IN; } break; - case 396: /* in_op ::= NOT IN */ -{ yymsp[-1].minor.yy198 = OP_TYPE_NOT_IN; } + case 394: /* in_op ::= NOT IN */ +{ yymsp[-1].minor.yy392 = OP_TYPE_NOT_IN; } break; - case 397: /* in_predicate_value ::= NK_LP literal_list NK_RP */ -{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy544)); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + case 395: /* in_predicate_value ::= NK_LP literal_list NK_RP */ +{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy172)); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 399: /* boolean_value_expression ::= NOT boolean_primary */ + case 397: /* boolean_value_expression ::= NOT boolean_primary */ { - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840); - yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy840), NULL)); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272); + yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy272), NULL)); } - yymsp[-1].minor.yy840 = yylhsminor.yy840; + yymsp[-1].minor.yy272 = yylhsminor.yy272; break; - case 400: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ + case 398: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840); - yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272); + yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272))); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 401: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ + case 399: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840); - yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272); + yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272))); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + yymsp[-2].minor.yy272 = yylhsminor.yy272; + break; + case 405: /* from_clause_opt ::= FROM table_reference_list */ + case 434: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==434); + case 457: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==457); +{ yymsp[-1].minor.yy272 = yymsp[0].minor.yy272; } break; - case 409: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */ -{ yylhsminor.yy840 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy840, yymsp[0].minor.yy840, NULL); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + case 407: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */ +{ yylhsminor.yy272 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy272, yymsp[0].minor.yy272, NULL); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 412: /* table_primary ::= table_name alias_opt */ -{ yylhsminor.yy840 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy617, &yymsp[0].minor.yy617); } - yymsp[-1].minor.yy840 = yylhsminor.yy840; + case 410: /* table_primary ::= table_name alias_opt */ +{ yylhsminor.yy272 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy209, &yymsp[0].minor.yy209); } + yymsp[-1].minor.yy272 = yylhsminor.yy272; break; - case 413: /* table_primary ::= db_name NK_DOT table_name alias_opt */ -{ yylhsminor.yy840 = createRealTableNode(pCxt, &yymsp[-3].minor.yy617, &yymsp[-1].minor.yy617, &yymsp[0].minor.yy617); } - yymsp[-3].minor.yy840 = yylhsminor.yy840; + case 411: /* table_primary ::= db_name NK_DOT table_name alias_opt */ +{ yylhsminor.yy272 = createRealTableNode(pCxt, &yymsp[-3].minor.yy209, &yymsp[-1].minor.yy209, &yymsp[0].minor.yy209); } + yymsp[-3].minor.yy272 = yylhsminor.yy272; break; - case 414: /* table_primary ::= subquery alias_opt */ -{ yylhsminor.yy840 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy840), &yymsp[0].minor.yy617); } - yymsp[-1].minor.yy840 = yylhsminor.yy840; + case 412: /* table_primary ::= subquery alias_opt */ +{ yylhsminor.yy272 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy272), &yymsp[0].minor.yy209); } + yymsp[-1].minor.yy272 = yylhsminor.yy272; break; - case 416: /* alias_opt ::= */ -{ yymsp[1].minor.yy617 = nil_token; } + case 414: /* alias_opt ::= */ +{ yymsp[1].minor.yy209 = nil_token; } break; - case 417: /* alias_opt ::= table_alias */ -{ yylhsminor.yy617 = yymsp[0].minor.yy617; } - yymsp[0].minor.yy617 = yylhsminor.yy617; + case 415: /* alias_opt ::= table_alias */ +{ yylhsminor.yy209 = yymsp[0].minor.yy209; } + yymsp[0].minor.yy209 = yylhsminor.yy209; break; - case 418: /* alias_opt ::= AS table_alias */ -{ yymsp[-1].minor.yy617 = yymsp[0].minor.yy617; } + case 416: /* alias_opt ::= AS table_alias */ +{ yymsp[-1].minor.yy209 = yymsp[0].minor.yy209; } break; - case 419: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */ - case 420: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==420); -{ yymsp[-2].minor.yy840 = yymsp[-1].minor.yy840; } + case 417: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */ + case 418: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==418); +{ yymsp[-2].minor.yy272 = yymsp[-1].minor.yy272; } break; - case 421: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ -{ yylhsminor.yy840 = createJoinTableNode(pCxt, yymsp[-4].minor.yy708, yymsp[-5].minor.yy840, yymsp[-2].minor.yy840, yymsp[0].minor.yy840); } - yymsp[-5].minor.yy840 = yylhsminor.yy840; + case 419: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ +{ yylhsminor.yy272 = createJoinTableNode(pCxt, yymsp[-4].minor.yy156, yymsp[-5].minor.yy272, yymsp[-2].minor.yy272, yymsp[0].minor.yy272); } + yymsp[-5].minor.yy272 = yylhsminor.yy272; break; - case 422: /* join_type ::= */ -{ yymsp[1].minor.yy708 = JOIN_TYPE_INNER; } + case 420: /* join_type ::= */ +{ yymsp[1].minor.yy156 = JOIN_TYPE_INNER; } break; - case 423: /* join_type ::= INNER */ -{ yymsp[0].minor.yy708 = JOIN_TYPE_INNER; } + case 421: /* join_type ::= INNER */ +{ yymsp[0].minor.yy156 = JOIN_TYPE_INNER; } break; - case 424: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ + case 422: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ { - yymsp[-11].minor.yy840 = createSelectStmt(pCxt, yymsp[-10].minor.yy313, yymsp[-9].minor.yy544, yymsp[-8].minor.yy840); - yymsp[-11].minor.yy840 = addWhereClause(pCxt, yymsp[-11].minor.yy840, yymsp[-7].minor.yy840); - yymsp[-11].minor.yy840 = addPartitionByClause(pCxt, yymsp[-11].minor.yy840, yymsp[-6].minor.yy544); - yymsp[-11].minor.yy840 = addWindowClauseClause(pCxt, yymsp[-11].minor.yy840, yymsp[-2].minor.yy840); - yymsp[-11].minor.yy840 = addGroupByClause(pCxt, yymsp[-11].minor.yy840, yymsp[-1].minor.yy544); - yymsp[-11].minor.yy840 = addHavingClause(pCxt, yymsp[-11].minor.yy840, yymsp[0].minor.yy840); - yymsp[-11].minor.yy840 = addRangeClause(pCxt, yymsp[-11].minor.yy840, yymsp[-5].minor.yy840); - yymsp[-11].minor.yy840 = addEveryClause(pCxt, yymsp[-11].minor.yy840, yymsp[-4].minor.yy840); - yymsp[-11].minor.yy840 = addFillClause(pCxt, yymsp[-11].minor.yy840, yymsp[-3].minor.yy840); + yymsp[-11].minor.yy272 = createSelectStmt(pCxt, yymsp[-10].minor.yy293, yymsp[-9].minor.yy172, yymsp[-8].minor.yy272); + yymsp[-11].minor.yy272 = addWhereClause(pCxt, yymsp[-11].minor.yy272, yymsp[-7].minor.yy272); + yymsp[-11].minor.yy272 = addPartitionByClause(pCxt, yymsp[-11].minor.yy272, yymsp[-6].minor.yy172); + yymsp[-11].minor.yy272 = addWindowClauseClause(pCxt, yymsp[-11].minor.yy272, yymsp[-2].minor.yy272); + yymsp[-11].minor.yy272 = addGroupByClause(pCxt, yymsp[-11].minor.yy272, yymsp[-1].minor.yy172); + yymsp[-11].minor.yy272 = addHavingClause(pCxt, yymsp[-11].minor.yy272, yymsp[0].minor.yy272); + yymsp[-11].minor.yy272 = addRangeClause(pCxt, yymsp[-11].minor.yy272, yymsp[-5].minor.yy272); + yymsp[-11].minor.yy272 = addEveryClause(pCxt, yymsp[-11].minor.yy272, yymsp[-4].minor.yy272); + yymsp[-11].minor.yy272 = addFillClause(pCxt, yymsp[-11].minor.yy272, yymsp[-3].minor.yy272); } break; - case 427: /* set_quantifier_opt ::= ALL */ -{ yymsp[0].minor.yy313 = false; } + case 425: /* set_quantifier_opt ::= ALL */ +{ yymsp[0].minor.yy293 = false; } break; - case 430: /* select_item ::= NK_STAR */ -{ yylhsminor.yy840 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy840 = yylhsminor.yy840; + case 428: /* select_item ::= NK_STAR */ +{ yylhsminor.yy272 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy272 = yylhsminor.yy272; break; - case 432: /* select_item ::= common_expression column_alias */ -{ yylhsminor.yy840 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy840), &yymsp[0].minor.yy617); } - yymsp[-1].minor.yy840 = yylhsminor.yy840; + case 430: /* select_item ::= common_expression column_alias */ +{ yylhsminor.yy272 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy272), &yymsp[0].minor.yy209); } + yymsp[-1].minor.yy272 = yylhsminor.yy272; break; - case 433: /* select_item ::= common_expression AS column_alias */ -{ yylhsminor.yy840 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), &yymsp[0].minor.yy617); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + case 431: /* select_item ::= common_expression AS column_alias */ +{ yylhsminor.yy272 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), &yymsp[0].minor.yy209); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 438: /* partition_by_clause_opt ::= PARTITION BY expression_list */ - case 455: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==455); - case 471: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==471); -{ yymsp[-2].minor.yy544 = yymsp[0].minor.yy544; } + case 436: /* partition_by_clause_opt ::= PARTITION BY expression_list */ + case 453: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==453); + case 469: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==469); +{ yymsp[-2].minor.yy172 = yymsp[0].minor.yy172; } break; - case 440: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ -{ yymsp[-5].minor.yy840 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), releaseRawExprNode(pCxt, yymsp[-1].minor.yy840)); } + case 438: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ +{ yymsp[-5].minor.yy272 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), releaseRawExprNode(pCxt, yymsp[-1].minor.yy272)); } break; - case 441: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */ -{ yymsp[-3].minor.yy840 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy840)); } + case 439: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */ +{ yymsp[-3].minor.yy272 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy272)); } break; - case 442: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ -{ yymsp[-5].minor.yy840 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), NULL, yymsp[-1].minor.yy840, yymsp[0].minor.yy840); } + case 440: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ +{ yymsp[-5].minor.yy272 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), NULL, yymsp[-1].minor.yy272, yymsp[0].minor.yy272); } break; - case 443: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ -{ yymsp[-7].minor.yy840 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy840), releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), yymsp[-1].minor.yy840, yymsp[0].minor.yy840); } + case 441: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ +{ yymsp[-7].minor.yy272 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy272), releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), yymsp[-1].minor.yy272, yymsp[0].minor.yy272); } break; - case 445: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ - case 463: /* every_opt ::= EVERY NK_LP duration_literal NK_RP */ yytestcase(yyruleno==463); -{ yymsp[-3].minor.yy840 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy840); } + case 443: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ + case 461: /* every_opt ::= EVERY NK_LP duration_literal NK_RP */ yytestcase(yyruleno==461); +{ yymsp[-3].minor.yy272 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy272); } break; - case 447: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */ -{ yymsp[-3].minor.yy840 = createFillNode(pCxt, yymsp[-1].minor.yy816, NULL); } + case 445: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */ +{ yymsp[-3].minor.yy272 = createFillNode(pCxt, yymsp[-1].minor.yy186, NULL); } break; - case 448: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ -{ yymsp[-5].minor.yy840 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy544)); } + case 446: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ +{ yymsp[-5].minor.yy272 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy172)); } break; - case 449: /* fill_mode ::= NONE */ -{ yymsp[0].minor.yy816 = FILL_MODE_NONE; } + case 447: /* fill_mode ::= NONE */ +{ yymsp[0].minor.yy186 = FILL_MODE_NONE; } break; - case 450: /* fill_mode ::= PREV */ -{ yymsp[0].minor.yy816 = FILL_MODE_PREV; } + case 448: /* fill_mode ::= PREV */ +{ yymsp[0].minor.yy186 = FILL_MODE_PREV; } break; - case 451: /* fill_mode ::= NULL */ -{ yymsp[0].minor.yy816 = FILL_MODE_NULL; } + case 449: /* fill_mode ::= NULL */ +{ yymsp[0].minor.yy186 = FILL_MODE_NULL; } break; - case 452: /* fill_mode ::= LINEAR */ -{ yymsp[0].minor.yy816 = FILL_MODE_LINEAR; } + case 450: /* fill_mode ::= LINEAR */ +{ yymsp[0].minor.yy186 = FILL_MODE_LINEAR; } break; - case 453: /* fill_mode ::= NEXT */ -{ yymsp[0].minor.yy816 = FILL_MODE_NEXT; } + case 451: /* fill_mode ::= NEXT */ +{ yymsp[0].minor.yy186 = FILL_MODE_NEXT; } break; - case 456: /* group_by_list ::= expression */ -{ yylhsminor.yy544 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy840))); } - yymsp[0].minor.yy544 = yylhsminor.yy544; + case 454: /* group_by_list ::= expression */ +{ yylhsminor.yy172 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy272))); } + yymsp[0].minor.yy172 = yylhsminor.yy172; break; - case 457: /* group_by_list ::= group_by_list NK_COMMA expression */ -{ yylhsminor.yy544 = addNodeToList(pCxt, yymsp[-2].minor.yy544, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy840))); } - yymsp[-2].minor.yy544 = yylhsminor.yy544; + case 455: /* group_by_list ::= group_by_list NK_COMMA expression */ +{ yylhsminor.yy172 = addNodeToList(pCxt, yymsp[-2].minor.yy172, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy272))); } + yymsp[-2].minor.yy172 = yylhsminor.yy172; break; - case 461: /* range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP */ -{ yymsp[-5].minor.yy840 = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), releaseRawExprNode(pCxt, yymsp[-1].minor.yy840)); } + case 459: /* range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP */ +{ yymsp[-5].minor.yy272 = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), releaseRawExprNode(pCxt, yymsp[-1].minor.yy272)); } break; - case 464: /* query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */ + case 462: /* query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */ { - yylhsminor.yy840 = addOrderByClause(pCxt, yymsp[-3].minor.yy840, yymsp[-2].minor.yy544); - yylhsminor.yy840 = addSlimitClause(pCxt, yylhsminor.yy840, yymsp[-1].minor.yy840); - yylhsminor.yy840 = addLimitClause(pCxt, yylhsminor.yy840, yymsp[0].minor.yy840); + yylhsminor.yy272 = addOrderByClause(pCxt, yymsp[-3].minor.yy272, yymsp[-2].minor.yy172); + yylhsminor.yy272 = addSlimitClause(pCxt, yylhsminor.yy272, yymsp[-1].minor.yy272); + yylhsminor.yy272 = addLimitClause(pCxt, yylhsminor.yy272, yymsp[0].minor.yy272); } - yymsp[-3].minor.yy840 = yylhsminor.yy840; + yymsp[-3].minor.yy272 = yylhsminor.yy272; break; - case 466: /* query_expression_body ::= query_expression_body UNION ALL query_expression_body */ -{ yylhsminor.yy840 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy840, yymsp[0].minor.yy840); } - yymsp[-3].minor.yy840 = yylhsminor.yy840; + case 464: /* query_expression_body ::= query_expression_body UNION ALL query_expression_body */ +{ yylhsminor.yy272 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy272, yymsp[0].minor.yy272); } + yymsp[-3].minor.yy272 = yylhsminor.yy272; break; - case 467: /* query_expression_body ::= query_expression_body UNION query_expression_body */ -{ yylhsminor.yy840 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy840, yymsp[0].minor.yy840); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + case 465: /* query_expression_body ::= query_expression_body UNION query_expression_body */ +{ yylhsminor.yy272 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy272, yymsp[0].minor.yy272); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 469: /* query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */ + case 467: /* query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */ { - yymsp[-5].minor.yy840 = addOrderByClause(pCxt, yymsp[-4].minor.yy840, yymsp[-3].minor.yy544); - yymsp[-5].minor.yy840 = addSlimitClause(pCxt, yymsp[-5].minor.yy840, yymsp[-2].minor.yy840); - yymsp[-5].minor.yy840 = addLimitClause(pCxt, yymsp[-5].minor.yy840, yymsp[-1].minor.yy840); + yymsp[-5].minor.yy272 = addOrderByClause(pCxt, yymsp[-4].minor.yy272, yymsp[-3].minor.yy172); + yymsp[-5].minor.yy272 = addSlimitClause(pCxt, yymsp[-5].minor.yy272, yymsp[-2].minor.yy272); + yymsp[-5].minor.yy272 = addLimitClause(pCxt, yymsp[-5].minor.yy272, yymsp[-1].minor.yy272); } break; - case 473: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */ - case 477: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==477); -{ yymsp[-1].minor.yy840 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); } + case 471: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */ + case 475: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==475); +{ yymsp[-1].minor.yy272 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); } break; - case 474: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ - case 478: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==478); -{ yymsp[-3].minor.yy840 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } + case 472: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ + case 476: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==476); +{ yymsp[-3].minor.yy272 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } break; - case 475: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - case 479: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==479); -{ yymsp[-3].minor.yy840 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); } + case 473: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + case 477: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==477); +{ yymsp[-3].minor.yy272 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); } break; - case 480: /* subquery ::= NK_LP query_expression NK_RP */ -{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy840); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + case 478: /* subquery ::= NK_LP query_expression NK_RP */ +{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy272); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 484: /* sort_specification ::= expression ordering_specification_opt null_ordering_opt */ -{ yylhsminor.yy840 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), yymsp[-1].minor.yy204, yymsp[0].minor.yy277); } - yymsp[-2].minor.yy840 = yylhsminor.yy840; + case 482: /* sort_specification ::= expression ordering_specification_opt null_ordering_opt */ +{ yylhsminor.yy272 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), yymsp[-1].minor.yy818, yymsp[0].minor.yy493); } + yymsp[-2].minor.yy272 = yylhsminor.yy272; break; - case 485: /* ordering_specification_opt ::= */ -{ yymsp[1].minor.yy204 = ORDER_ASC; } + case 483: /* ordering_specification_opt ::= */ +{ yymsp[1].minor.yy818 = ORDER_ASC; } break; - case 486: /* ordering_specification_opt ::= ASC */ -{ yymsp[0].minor.yy204 = ORDER_ASC; } + case 484: /* ordering_specification_opt ::= ASC */ +{ yymsp[0].minor.yy818 = ORDER_ASC; } break; - case 487: /* ordering_specification_opt ::= DESC */ -{ yymsp[0].minor.yy204 = ORDER_DESC; } + case 485: /* ordering_specification_opt ::= DESC */ +{ yymsp[0].minor.yy818 = ORDER_DESC; } break; - case 488: /* null_ordering_opt ::= */ -{ yymsp[1].minor.yy277 = NULL_ORDER_DEFAULT; } + case 486: /* null_ordering_opt ::= */ +{ yymsp[1].minor.yy493 = NULL_ORDER_DEFAULT; } break; - case 489: /* null_ordering_opt ::= NULLS FIRST */ -{ yymsp[-1].minor.yy277 = NULL_ORDER_FIRST; } + case 487: /* null_ordering_opt ::= NULLS FIRST */ +{ yymsp[-1].minor.yy493 = NULL_ORDER_FIRST; } break; - case 490: /* null_ordering_opt ::= NULLS LAST */ -{ yymsp[-1].minor.yy277 = NULL_ORDER_LAST; } + case 488: /* null_ordering_opt ::= NULLS LAST */ +{ yymsp[-1].minor.yy493 = NULL_ORDER_LAST; } break; default: break; diff --git a/source/libs/parser/test/mockCatalog.cpp b/source/libs/parser/test/mockCatalog.cpp index b376c33d1aca8951ed31297cd12a1843ebf47462..cd7a9d549a1faab8994f71e7bf659c3a45f2cc01 100644 --- a/source/libs/parser/test/mockCatalog.cpp +++ b/source/libs/parser/test/mockCatalog.cpp @@ -137,7 +137,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) { } { ITableBuilder& builder = - mcs->createTableBuilder(TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS, TSDB_SYSTEM_TABLE, 1) + mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STREAMS, TSDB_SYSTEM_TABLE, 1) .addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN); builder.done(); } @@ -149,7 +149,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) { } { ITableBuilder& builder = - mcs->createTableBuilder(TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_SUBSCRIPTIONS, TSDB_SYSTEM_TABLE, 1) + mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SUBSCRIPTIONS, TSDB_SYSTEM_TABLE, 1) .addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN); builder.done(); } diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp index 9bca6cae0a41a145237b1035c5dd1edb4fdf0cd9..68c4ac3706e2a42fd370a96a85d1adf6df162774 100644 --- a/source/libs/parser/test/parInitialCTest.cpp +++ b/source/libs/parser/test/parInitialCTest.cpp @@ -568,15 +568,13 @@ TEST_F(ParserInitialCTest, createStream) { memset(&expect, 0, sizeof(SCMCreateStreamReq)); }; - auto setCreateStreamReqFunc = [&](const char* pStream, const char* pSrcDb, const char* pSql, - const char* pDstStb = nullptr, int8_t igExists = 0, - int8_t triggerType = STREAM_TRIGGER_AT_ONCE, int64_t maxDelay = 0, - int64_t watermark = 0, int8_t igExpired = STREAM_DEFAULT_IGNORE_EXPIRED) { + auto setCreateStreamReqFunc = [&](const char* pStream, const char* pSrcDb, const char* pSql, const char* pDstStb, + int8_t igExists = 0, int8_t triggerType = STREAM_TRIGGER_AT_ONCE, + int64_t maxDelay = 0, int64_t watermark = 0, + int8_t igExpired = STREAM_DEFAULT_IGNORE_EXPIRED) { snprintf(expect.name, sizeof(expect.name), "0.%s", pStream); snprintf(expect.sourceDB, sizeof(expect.sourceDB), "0.%s", pSrcDb); - if (NULL != pDstStb) { - snprintf(expect.targetStbFullName, sizeof(expect.targetStbFullName), "0.test.%s", pDstStb); - } + snprintf(expect.targetStbFullName, sizeof(expect.targetStbFullName), "0.test.%s", pDstStb); expect.igExists = igExists; expect.sql = strdup(pSql); expect.triggerType = triggerType; @@ -603,15 +601,6 @@ TEST_F(ParserInitialCTest, createStream) { tFreeSCMCreateStreamReq(&req); }); - setCreateStreamReqFunc("s1", "test", "create stream s1 as select count(*) from t1 interval(10s)"); - run("CREATE STREAM s1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)"); - clearCreateStreamReq(); - - setCreateStreamReqFunc("s1", "test", "create stream if not exists s1 as select count(*) from t1 interval(10s)", - nullptr, 1); - run("CREATE STREAM IF NOT EXISTS s1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)"); - clearCreateStreamReq(); - setCreateStreamReqFunc("s1", "test", "create stream s1 into st1 as select count(*) from t1 interval(10s)", "st1"); run("CREATE STREAM s1 INTO st1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)"); clearCreateStreamReq(); @@ -629,7 +618,8 @@ TEST_F(ParserInitialCTest, createStream) { TEST_F(ParserInitialCTest, createStreamSemanticCheck) { useDb("root", "test"); - run("CREATE STREAM s1 AS SELECT PERCENTILE(c1, 30) FROM t1 INTERVAL(10S)", TSDB_CODE_PAR_STREAM_NOT_ALLOWED_FUNC); + run("CREATE STREAM s1 INTO st1 AS SELECT PERCENTILE(c1, 30) FROM t1 INTERVAL(10S)", + TSDB_CODE_PAR_STREAM_NOT_ALLOWED_FUNC); } TEST_F(ParserInitialCTest, createTable) { diff --git a/source/libs/parser/test/parInsertTest.cpp b/source/libs/parser/test/parInsertTest.cpp index 7302491ba7b15daca8333c4b9870eb3615e0c015..ddf15ec67bf2b77edd2e1e622aad409a9ecc0e69 100644 --- a/source/libs/parser/test/parInsertTest.cpp +++ b/source/libs/parser/test/parInsertTest.cpp @@ -13,21 +13,13 @@ * along with this program. If not, see . */ -#include - #include -#include "mockCatalogService.h" -#include "os.h" -#include "parInt.h" +#include "parTestUtil.h" using namespace std; -using namespace std::placeholders; -using namespace testing; -namespace { -string toString(int32_t code) { return tstrerror(code); } -} // namespace +namespace ParserTest { // syntax: // INSERT INTO @@ -36,259 +28,60 @@ string toString(int32_t code) { return tstrerror(code); } // [(field1_name, ...)] // VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path // [...]; -class InsertTest : public Test { - protected: - InsertTest() : res_(nullptr) {} - ~InsertTest() { reset(); } - - void setDatabase(const string& acctId, const string& db) { - acctId_ = acctId; - db_ = db; - } - - void bind(const char* sql) { - reset(); - cxt_.acctId = atoi(acctId_.c_str()); - cxt_.db = (char*)db_.c_str(); - strcpy(sqlBuf_, sql); - cxt_.sqlLen = strlen(sql); - sqlBuf_[cxt_.sqlLen] = '\0'; - cxt_.pSql = sqlBuf_; - } - - int32_t run() { - code_ = parseInsertSql(&cxt_, &res_, nullptr); - if (code_ != TSDB_CODE_SUCCESS) { - cout << "code:" << toString(code_) << ", msg:" << errMagBuf_ << endl; - } - return code_; - } - - int32_t runAsync() { - cxt_.async = true; - bool request = true; - unique_ptr > metaCache( - new SParseMetaCache(), std::bind(_destoryParseMetaCache, _1, cref(request))); - code_ = parseInsertSyntax(&cxt_, &res_, metaCache.get()); - if (code_ != TSDB_CODE_SUCCESS) { - cout << "parseInsertSyntax code:" << toString(code_) << ", msg:" << errMagBuf_ << endl; - return code_; - } - - unique_ptr catalogReq(new SCatalogReq(), - MockCatalogService::destoryCatalogReq); - code_ = buildCatalogReq(metaCache.get(), catalogReq.get()); - if (code_ != TSDB_CODE_SUCCESS) { - cout << "buildCatalogReq code:" << toString(code_) << ", msg:" << errMagBuf_ << endl; - return code_; - } - - unique_ptr metaData(new SMetaData(), MockCatalogService::destoryMetaData); - g_mockCatalogService->catalogGetAllMeta(catalogReq.get(), metaData.get()); - - metaCache.reset(new SParseMetaCache()); - request = false; - code_ = putMetaDataToCache(catalogReq.get(), metaData.get(), metaCache.get()); - if (code_ != TSDB_CODE_SUCCESS) { - cout << "putMetaDataToCache code:" << toString(code_) << ", msg:" << errMagBuf_ << endl; - return code_; - } - - code_ = parseInsertSql(&cxt_, &res_, metaCache.get()); - if (code_ != TSDB_CODE_SUCCESS) { - cout << "parseInsertSql code:" << toString(code_) << ", msg:" << errMagBuf_ << endl; - return code_; - } - - return code_; - } - - void dumpReslut() { - SVnodeModifOpStmt* pStmt = getVnodeModifStmt(res_); - size_t num = taosArrayGetSize(pStmt->pDataBlocks); - cout << "payloadType:" << (int32_t)pStmt->payloadType << ", insertType:" << pStmt->insertType - << ", numOfVgs:" << num << endl; - for (size_t i = 0; i < num; ++i) { - SVgDataBlocks* vg = (SVgDataBlocks*)taosArrayGetP(pStmt->pDataBlocks, i); - cout << "vgId:" << vg->vg.vgId << ", numOfTables:" << vg->numOfTables << ", dataSize:" << vg->size << endl; - SSubmitReq* submit = (SSubmitReq*)vg->pData; - cout << "length:" << ntohl(submit->length) << ", numOfBlocks:" << ntohl(submit->numOfBlocks) << endl; - int32_t numOfBlocks = ntohl(submit->numOfBlocks); - SSubmitBlk* blk = (SSubmitBlk*)(submit + 1); - for (int32_t i = 0; i < numOfBlocks; ++i) { - cout << "Block:" << i << endl; - cout << "\tuid:" << be64toh(blk->uid) << ", tid:" << be64toh(blk->suid) << ", sversion:" << ntohl(blk->sversion) - << ", dataLen:" << ntohl(blk->dataLen) << ", schemaLen:" << ntohl(blk->schemaLen) - << ", numOfRows:" << ntohl(blk->numOfRows) << endl; - blk = (SSubmitBlk*)(blk->data + ntohl(blk->dataLen)); - } - } - } - - void checkReslut(int32_t numOfTables, int32_t numOfRows1, int32_t numOfRows2 = -1) { - SVnodeModifOpStmt* pStmt = getVnodeModifStmt(res_); - ASSERT_EQ(pStmt->payloadType, PAYLOAD_TYPE_KV); - ASSERT_EQ(pStmt->insertType, TSDB_QUERY_TYPE_INSERT); - size_t num = taosArrayGetSize(pStmt->pDataBlocks); - ASSERT_GE(num, 0); - for (size_t i = 0; i < num; ++i) { - SVgDataBlocks* vg = (SVgDataBlocks*)taosArrayGetP(pStmt->pDataBlocks, i); - ASSERT_EQ(vg->numOfTables, numOfTables); - ASSERT_GE(vg->size, 0); - SSubmitReq* submit = (SSubmitReq*)vg->pData; - ASSERT_GE(ntohl(submit->length), 0); - ASSERT_GE(ntohl(submit->numOfBlocks), 0); - int32_t numOfBlocks = ntohl(submit->numOfBlocks); - SSubmitBlk* blk = (SSubmitBlk*)(submit + 1); - for (int32_t i = 0; i < numOfBlocks; ++i) { - ASSERT_EQ(ntohl(blk->numOfRows), (0 == i ? numOfRows1 : (numOfRows2 > 0 ? numOfRows2 : numOfRows1))); - blk = (SSubmitBlk*)(blk->data + ntohl(blk->dataLen)); - } - } - } - - private: - static const int max_err_len = 1024; - static const int max_sql_len = 1024 * 1024; - - static void _destoryParseMetaCache(SParseMetaCache* pMetaCache, bool request) { - destoryParseMetaCache(pMetaCache, request); - delete pMetaCache; - } - - void reset() { - memset(&cxt_, 0, sizeof(cxt_)); - memset(errMagBuf_, 0, max_err_len); - cxt_.pMsg = errMagBuf_; - cxt_.msgLen = max_err_len; - code_ = TSDB_CODE_SUCCESS; - qDestroyQuery(res_); - res_ = nullptr; - } - - SVnodeModifOpStmt* getVnodeModifStmt(SQuery* pQuery) { return (SVnodeModifOpStmt*)pQuery->pRoot; } - - string acctId_; - string db_; - char errMagBuf_[max_err_len]; - char sqlBuf_[max_sql_len]; - SParseContext cxt_; - int32_t code_; - SQuery* res_; -}; +class ParserInsertTest : public ParserTestBase {}; // INSERT INTO tb_name [(field1_name, ...)] VALUES (field1_value, ...) -TEST_F(InsertTest, singleTableSingleRowTest) { - setDatabase("root", "test"); - - bind("insert into t1 values (now, 1, 'beijing', 3, 4, 5)"); - ASSERT_EQ(run(), TSDB_CODE_SUCCESS); - dumpReslut(); - checkReslut(1, 1); +TEST_F(ParserInsertTest, singleTableSingleRowTest) { + useDb("root", "test"); - bind("insert into t1 (ts, c1, c2, c3, c4, c5) values (now, 1, 'beijing', 3, 4, 5)"); - ASSERT_EQ(run(), TSDB_CODE_SUCCESS); + run("INSERT INTO t1 VALUES (now, 1, 'beijing', 3, 4, 5)"); - bind("insert into t1 values (now, 1, 'beijing', 3, 4, 5)"); - ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); - dumpReslut(); - checkReslut(1, 1); - - bind("insert into t1 (ts, c1, c2, c3, c4, c5) values (now, 1, 'beijing', 3, 4, 5)"); - ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); + run("INSERT INTO t1 (ts, c1, c2, c3, c4, c5) VALUES (now, 1, 'beijing', 3, 4, 5)"); } // INSERT INTO tb_name VALUES (field1_value, ...)(field1_value, ...) -TEST_F(InsertTest, singleTableMultiRowTest) { - setDatabase("root", "test"); - - bind( - "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)" - "(now+2s, 3, 'guangzhou', 9, 10, 11)"); - ASSERT_EQ(run(), TSDB_CODE_SUCCESS); - dumpReslut(); - checkReslut(1, 3); +TEST_F(ParserInsertTest, singleTableMultiRowTest) { + useDb("root", "test"); - bind( - "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)" + run("INSERT INTO t1 VALUES (now, 1, 'beijing', 3, 4, 5)" + "(now+1s, 2, 'shanghai', 6, 7, 8)" "(now+2s, 3, 'guangzhou', 9, 10, 11)"); - ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); } // INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...) -TEST_F(InsertTest, multiTableSingleRowTest) { - setDatabase("root", "test"); +TEST_F(ParserInsertTest, multiTableSingleRowTest) { + useDb("root", "test"); - bind("insert into st1s1 values (now, 1, \"beijing\") st1s2 values (now, 10, \"131028\")"); - ASSERT_EQ(run(), TSDB_CODE_SUCCESS); - dumpReslut(); - checkReslut(2, 1); - - bind("insert into st1s1 values (now, 1, \"beijing\") st1s2 values (now, 10, \"131028\")"); - ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); + run("INSERT INTO st1s1 VALUES (now, 1, 'beijing') st1s2 VALUES (now, 10, '131028')"); } // INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...) -TEST_F(InsertTest, multiTableMultiRowTest) { - setDatabase("root", "test"); - - bind( - "insert into st1s1 values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")" - " st1s2 values (now, 10, \"131028\")(now+1s, 20, \"132028\")"); - ASSERT_EQ(run(), TSDB_CODE_SUCCESS); - dumpReslut(); - checkReslut(2, 3, 2); +TEST_F(ParserInsertTest, multiTableMultiRowTest) { + useDb("root", "test"); - bind( - "insert into st1s1 values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")" - " st1s2 values (now, 10, \"131028\")(now+1s, 20, \"132028\")"); - ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); + run("INSERT INTO " + "st1s1 VALUES (now, 1, 'beijing')(now+1s, 2, 'shanghai')(now+2s, 3, 'guangzhou') " + "st1s2 VALUES (now, 10, '131028')(now+1s, 20, '132028')"); } // INSERT INTO // tb1_name USING st1_name [(tag1_name, ...)] TAGS (tag1_value, ...) VALUES (field1_value, ...) // tb2_name USING st2_name [(tag1_name, ...)] TAGS (tag1_value, ...) VALUES (field1_value, ...) -TEST_F(InsertTest, autoCreateTableTest) { - setDatabase("root", "test"); - - bind( - "insert into st1s1 using st1 tags(1, 'wxy', now) " - "values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")"); - ASSERT_EQ(run(), TSDB_CODE_SUCCESS); - dumpReslut(); - checkReslut(1, 3); +TEST_F(ParserInsertTest, autoCreateTableTest) { + useDb("root", "test"); - bind( - "insert into st1s1 using st1 (tag1, tag2) tags(1, 'wxy') values (now, 1, \"beijing\")" - "(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")"); - ASSERT_EQ(run(), TSDB_CODE_SUCCESS); + run("INSERT INTO st1s1 USING st1 TAGS(1, 'wxy', now) " + "VALUES (now, 1, 'beijing')(now+1s, 2, 'shanghai')(now+2s, 3, 'guangzhou')"); - bind( - "insert into st1s1 using st1 tags(1, 'wxy', now) " - "values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")"); - ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); + run("INSERT INTO st1s1 USING st1 (tag1, tag2) TAGS(1, 'wxy') (ts, c1, c2) " + "VALUES (now, 1, 'beijing')(now+1s, 2, 'shanghai')(now+2s, 3, 'guangzhou')"); - bind( - "insert into st1s1 using st1 (tag1, tag2) tags(1, 'wxy') values (now, 1, \"beijing\")" - "(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")"); - ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); + run("INSERT INTO st1s1 (ts, c1, c2) USING st1 (tag1, tag2) TAGS(1, 'wxy') " + "VALUES (now, 1, 'beijing')(now+1s, 2, 'shanghai')(now+2s, 3, 'guangzhou')"); - bind( - "insert into st1s1 using st1 tags(1, 'wxy', now) values (now, 1, \"beijing\")" - "st1s1 using st1 tags(1, 'wxy', now) values (now+1s, 2, \"shanghai\")"); - ASSERT_EQ(run(), TSDB_CODE_SUCCESS); + run("INSERT INTO " + "st1s1 USING st1 (tag1, tag2) TAGS(1, 'wxy') (ts, c1, c2) VALUES (now, 1, 'beijing') " + "st1s2 (ts, c1, c2) USING st1 TAGS(2, 'abc', now) VALUES (now+1s, 2, 'shanghai')"); } -TEST_F(InsertTest, toleranceTest) { - setDatabase("root", "test"); - - bind("insert into"); - ASSERT_NE(run(), TSDB_CODE_SUCCESS); - bind("insert into t"); - ASSERT_NE(run(), TSDB_CODE_SUCCESS); - - bind("insert into"); - ASSERT_NE(runAsync(), TSDB_CODE_SUCCESS); - bind("insert into t"); - ASSERT_NE(runAsync(), TSDB_CODE_SUCCESS); -} +} // namespace ParserTest diff --git a/source/libs/parser/test/parTestUtil.cpp b/source/libs/parser/test/parTestUtil.cpp index 3fe4b533e44fe70e8e999ef3cacd15715cd632dd..360b904c170e50682b17d9c99a8ec1cd679a6db0 100644 --- a/source/libs/parser/test/parTestUtil.cpp +++ b/source/libs/parser/test/parTestUtil.cpp @@ -207,6 +207,7 @@ class ParserTestBaseImpl { pCxt->db = caseEnv_.db_.c_str(); pCxt->pUser = caseEnv_.user_.c_str(); pCxt->isSuperUser = caseEnv_.user_ == "root"; + pCxt->enableSysInfo = true; pCxt->pSql = stmtEnv_.sql_.c_str(); pCxt->sqlLen = stmtEnv_.sql_.length(); pCxt->pMsg = stmtEnv_.msgBuf_.data(); @@ -225,16 +226,17 @@ class ParserTestBaseImpl { DO_WITH_THROW(collectMetaKey, pCxt, pQuery, pMetaCache); } - void doBuildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) { - DO_WITH_THROW(buildCatalogReq, pMetaCache, pCatalogReq); + void doBuildCatalogReq(SParseContext* pCxt, const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) { + DO_WITH_THROW(buildCatalogReq, pCxt, pMetaCache, pCatalogReq); } void doGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) { DO_WITH_THROW(g_mockCatalogService->catalogGetAllMeta, pCatalogReq, pMetaData); } - void doPutMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) { - DO_WITH_THROW(putMetaDataToCache, pCatalogReq, pMetaData, pMetaCache); + void doPutMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache, + bool isInsertValues) { + DO_WITH_THROW(putMetaDataToCache, pCatalogReq, pMetaData, pMetaCache, isInsertValues); } void doAuthenticate(SParseContext* pCxt, SQuery* pQuery, SParseMetaCache* pMetaCache) { @@ -261,7 +263,9 @@ class ParserTestBaseImpl { void doParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatalogReq) { DO_WITH_THROW(qParseSqlSyntax, pCxt, pQuery, pCatalogReq); ASSERT_NE(*pQuery, nullptr); - res_.parsedAst_ = toString((*pQuery)->pRoot); + if (nullptr != (*pQuery)->pRoot) { + res_.parsedAst_ = toString((*pQuery)->pRoot); + } } void doAnalyseSqlSemantic(SParseContext* pCxt, const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, @@ -270,6 +274,17 @@ class ParserTestBaseImpl { res_.calcConstAst_ = toString(pQuery->pRoot); } + void doParseInsertSql(SParseContext* pCxt, SQuery** pQuery, SParseMetaCache* pMetaCache) { + DO_WITH_THROW(parseInsertSql, pCxt, pQuery, pMetaCache); + ASSERT_NE(*pQuery, nullptr); + res_.parsedAst_ = toString((*pQuery)->pRoot); + } + + void doParseInsertSyntax(SParseContext* pCxt, SQuery** pQuery, SParseMetaCache* pMetaCache) { + DO_WITH_THROW(parseInsertSyntax, pCxt, pQuery, pMetaCache); + ASSERT_NE(*pQuery, nullptr); + } + string toString(const SNode* pRoot) { char* pStr = NULL; int32_t len = 0; @@ -287,15 +302,20 @@ class ParserTestBaseImpl { SParseContext cxt = {0}; setParseContext(sql, &cxt); - unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery); - doParse(&cxt, query.get()); - SQuery* pQuery = *(query.get()); + if (qIsInsertValuesSql(cxt.pSql, cxt.sqlLen)) { + unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery); + doParseInsertSql(&cxt, query.get(), nullptr); + } else { + unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery); + doParse(&cxt, query.get()); + SQuery* pQuery = *(query.get()); - doAuthenticate(&cxt, pQuery, nullptr); + doAuthenticate(&cxt, pQuery, nullptr); - doTranslate(&cxt, pQuery, nullptr); + doTranslate(&cxt, pQuery, nullptr); - doCalculateConstant(&cxt, pQuery); + doCalculateConstant(&cxt, pQuery); + } if (g_dump) { dump(); @@ -338,17 +358,22 @@ class ParserTestBaseImpl { setParseContext(sql, &cxt, true); unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery); - doParse(&cxt, query.get()); - SQuery* pQuery = *(query.get()); - - bool request = true; + bool request = true; unique_ptr > metaCache( new SParseMetaCache(), bind(_destoryParseMetaCache, _1, cref(request))); - doCollectMetaKey(&cxt, pQuery, metaCache.get()); + bool isInsertValues = qIsInsertValuesSql(cxt.pSql, cxt.sqlLen); + if (isInsertValues) { + doParseInsertSyntax(&cxt, query.get(), metaCache.get()); + } else { + doParse(&cxt, query.get()); + doCollectMetaKey(&cxt, *(query.get()), metaCache.get()); + } + + SQuery* pQuery = *(query.get()); unique_ptr catalogReq(new SCatalogReq(), MockCatalogService::destoryCatalogReq); - doBuildCatalogReq(metaCache.get(), catalogReq.get()); + doBuildCatalogReq(&cxt, metaCache.get(), catalogReq.get()); string err; thread t1([&]() { @@ -358,13 +383,17 @@ class ParserTestBaseImpl { metaCache.reset(new SParseMetaCache()); request = false; - doPutMetaDataToCache(catalogReq.get(), metaData.get(), metaCache.get()); + doPutMetaDataToCache(catalogReq.get(), metaData.get(), metaCache.get(), isInsertValues); - doAuthenticate(&cxt, pQuery, metaCache.get()); + if (isInsertValues) { + doParseInsertSql(&cxt, query.get(), metaCache.get()); + } else { + doAuthenticate(&cxt, pQuery, metaCache.get()); - doTranslate(&cxt, pQuery, metaCache.get()); + doTranslate(&cxt, pQuery, metaCache.get()); - doCalculateConstant(&cxt, pQuery); + doCalculateConstant(&cxt, pQuery); + } } catch (const TerminateFlag& e) { // success and terminate } catch (const runtime_error& e) { diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index c843dd0a67f64b6647d987c848a6507c69fa814a..bf72f5210577d6f43f8ae97d098091b3020aeb16 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -44,12 +44,15 @@ static void setColumnInfo(SFunctionNode* pFunc, SColumnNode* pCol) { pCol->colType = COLUMN_TYPE_TBNAME; break; case FUNCTION_TYPE_WSTART: + pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID; + pCol->colType = COLUMN_TYPE_WINDOW_START; + break; case FUNCTION_TYPE_WEND: pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID; - pCol->colType = COLUMN_TYPE_WINDOW_PC; + pCol->colType = COLUMN_TYPE_WINDOW_END; break; case FUNCTION_TYPE_WDURATION: - pCol->colType = COLUMN_TYPE_WINDOW_PC; + pCol->colType = COLUMN_TYPE_WINDOW_DURATION; break; case FUNCTION_TYPE_GROUP_KEY: pCol->colType = COLUMN_TYPE_GROUP_KEY; @@ -194,28 +197,21 @@ static EScanType getScanType(SLogicPlanContext* pCxt, SNodeList* pScanPseudoCols return SCAN_TYPE_TABLE; } -static SNode* createPrimaryKeyCol(uint64_t tableId) { +static SNode* createFirstCol(uint64_t tableId, const SSchema* pSchema) { SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); if (NULL == pCol) { return NULL; } - pCol->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP; - pCol->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes; + pCol->node.resType.type = pSchema->type; + pCol->node.resType.bytes = pSchema->bytes; pCol->tableId = tableId; - pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID; + pCol->colId = pSchema->colId; pCol->colType = COLUMN_TYPE_COLUMN; - strcpy(pCol->colName, "#primarykey"); + strcpy(pCol->colName, pSchema->name); return (SNode*)pCol; } -static int32_t addPrimaryKeyCol(uint64_t tableId, SNodeList** pCols) { - if (NULL == *pCols) { - *pCols = nodesMakeList(); - if (NULL == *pCols) { - return TSDB_CODE_OUT_OF_MEMORY; - } - } - +static int32_t addPrimaryKeyCol(uint64_t tableId, const SSchema* pSchema, SNodeList** pCols) { bool found = false; SNode* pCol = NULL; FOREACH(pCol, *pCols) { @@ -226,13 +222,25 @@ static int32_t addPrimaryKeyCol(uint64_t tableId, SNodeList** pCols) { } if (!found) { - if (TSDB_CODE_SUCCESS != nodesListStrictAppend(*pCols, createPrimaryKeyCol(tableId))) { - return TSDB_CODE_OUT_OF_MEMORY; - } + return nodesListMakeStrictAppend(pCols, createFirstCol(tableId, pSchema)); } return TSDB_CODE_SUCCESS; } +static int32_t addSystableFirstCol(uint64_t tableId, const SSchema* pSchema, SNodeList** pCols) { + if (LIST_LENGTH(*pCols) > 0) { + return TSDB_CODE_SUCCESS; + } + return nodesListMakeStrictAppend(pCols, createFirstCol(tableId, pSchema)); +} + +static int32_t addDefaultScanCol(const STableMeta* pMeta, SNodeList** pCols) { + if (TSDB_SYSTEM_TABLE == pMeta->tableType) { + return addSystableFirstCol(pMeta->uid, pMeta->schema, pCols); + } + return addPrimaryKeyCol(pMeta->uid, pMeta->schema, pCols); +} + static int32_t makeScanLogicNode(SLogicPlanContext* pCxt, SRealTableNode* pRealTable, bool hasRepeatScanFuncs, SLogicNode** pLogicNode) { SScanLogicNode* pScan = (SScanLogicNode*)nodesMakeNode(QUERY_NODE_LOGIC_PLAN_SCAN); @@ -296,8 +304,8 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect pScan->hasNormalCols = true; } - if (TSDB_CODE_SUCCESS == code && SCAN_TYPE_SYSTEM_TABLE != pScan->scanType) { - code = addPrimaryKeyCol(pScan->tableId, &pScan->pScanCols); + if (TSDB_CODE_SUCCESS == code) { + code = addDefaultScanCol(pRealTable->pMeta, &pScan->pScanCols); } // set output @@ -784,7 +792,8 @@ static int32_t createWindowLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSele static EDealRes needFillValueImpl(SNode* pNode, void* pContext) { if (QUERY_NODE_COLUMN == nodeType(pNode)) { SColumnNode* pCol = (SColumnNode*)pNode; - if (COLUMN_TYPE_WINDOW_PC != pCol->colType && COLUMN_TYPE_GROUP_KEY != pCol->colType) { + if (COLUMN_TYPE_WINDOW_START != pCol->colType && COLUMN_TYPE_WINDOW_END != pCol->colType && + COLUMN_TYPE_WINDOW_DURATION != pCol->colType && COLUMN_TYPE_GROUP_KEY != pCol->colType) { *(bool*)pContext = true; return DEAL_RES_END; } @@ -1002,7 +1011,8 @@ static int32_t createPartitionLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pS int32_t code = nodesCollectColumns(pSelect, SQL_CLAUSE_PARTITION_BY, NULL, COLLECT_COL_TYPE_ALL, &pPartition->node.pTargets); if (TSDB_CODE_SUCCESS == code && NULL == pPartition->node.pTargets) { - code = nodesListMakeStrictAppend(&pPartition->node.pTargets, nodesListGetNode(pCxt->pCurrRoot->pTargets, 0)); + code = nodesListMakeStrictAppend(&pPartition->node.pTargets, + nodesCloneNode(nodesListGetNode(pCxt->pCurrRoot->pTargets, 0))); } if (TSDB_CODE_SUCCESS == code) { diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 45ab3903a9e9eb6df844244b6fc7cd8d009ebd47..c2f1d71b184e390bb48ff1a953a719bf49e427e9 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -1084,7 +1084,7 @@ static int32_t sortPriKeyOptGetSequencingNodesImpl(SLogicNode* pNode, bool* pNot switch (nodeType(pNode)) { case QUERY_NODE_LOGIC_PLAN_SCAN: { SScanLogicNode* pScan = (SScanLogicNode*)pNode; - if (NULL != pScan->pGroupTags) { + if (NULL != pScan->pGroupTags || TSDB_SYSTEM_TABLE == pScan->tableType) { *pNotOptimize = true; return TSDB_CODE_SUCCESS; } @@ -1665,7 +1665,10 @@ static bool eliminateProjOptMayBeOptimized(SLogicNode* pNode) { return false; } - if (QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(pNode) || 1 != LIST_LENGTH(pNode->pChildren)) { + // Super table scan requires project operator to merge packets to improve performance. + if (QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(pNode) || 1 != LIST_LENGTH(pNode->pChildren) || + (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(nodesListGetNode(pNode->pChildren, 0)) && + TSDB_SUPER_TABLE == ((SScanLogicNode*)nodesListGetNode(pNode->pChildren, 0))->tableType)) { return false; } diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index c7eb6f7b5e61fdd1d4c29cc88a8b980bc1efdf79..cafae18dbe812546f6ef931d804ca4a9e5c1a6fa 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -576,6 +576,7 @@ static int32_t createSystemTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pScan->showRewrite = pScanLogicNode->showRewrite; pScan->accountId = pCxt->pPlanCxt->acctId; + pScan->sysInfo = pCxt->pPlanCxt->sysInfo; if (0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_TABLES) || 0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_TABLE_DISTRIBUTED) || 0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_TAGS)) { diff --git a/source/libs/planner/test/planOtherTest.cpp b/source/libs/planner/test/planOtherTest.cpp index 7107f8b3c94c616ae9db90132a59f2804b542aca..350ccd0d927c9773059cfb2c027a0ca2292e4d13 100644 --- a/source/libs/planner/test/planOtherTest.cpp +++ b/source/libs/planner/test/planOtherTest.cpp @@ -37,9 +37,9 @@ TEST_F(PlanOtherTest, createStream) { TEST_F(PlanOtherTest, createStreamUseSTable) { useDb("root", "test"); - run("CREATE STREAM IF NOT EXISTS s1 as SELECT COUNT(*) FROM st1 INTERVAL(10s)"); + run("CREATE STREAM IF NOT EXISTS s1 into st1 as SELECT COUNT(*) FROM st1 INTERVAL(10s)"); - run("CREATE STREAM IF NOT EXISTS s1 as SELECT COUNT(*) FROM st1 PARTITION BY TBNAME INTERVAL(10s)"); + run("CREATE STREAM IF NOT EXISTS s1 into st1 as SELECT COUNT(*) FROM st1 PARTITION BY TBNAME INTERVAL(10s)"); } TEST_F(PlanOtherTest, createSmaIndex) { diff --git a/source/libs/planner/test/planSysTbTest.cpp b/source/libs/planner/test/planSysTbTest.cpp index 921f86f09a41d36448ab0d435ab6a439645b9bfc..6b40e381cc18cb75cc9271352cd654d31a74242b 100644 --- a/source/libs/planner/test/planSysTbTest.cpp +++ b/source/libs/planner/test/planSysTbTest.cpp @@ -32,3 +32,9 @@ TEST_F(PlanSysTableTest, informationSchema) { run("SELECT * FROM information_schema.ins_databases WHERE name = 'information_schema'"); } + +TEST_F(PlanSysTableTest, withAgg) { + useDb("root", "information_schema"); + + run("SELECT COUNT(1) FROM ins_users"); +} diff --git a/source/libs/planner/test/planTestUtil.cpp b/source/libs/planner/test/planTestUtil.cpp index 5fc8b3cf302612c9b8528a8380475b32a79a8824..96f7d29230bafc94639be35fcc56550c029ffbac 100644 --- a/source/libs/planner/test/planTestUtil.cpp +++ b/source/libs/planner/test/planTestUtil.cpp @@ -278,12 +278,12 @@ class PlannerTestBaseImpl { } void dump(DumpModule module) { + cout << "========================================== " << sqlNo_ << " sql : [" << stmtEnv_.sql_ << "]" << endl; + if (DUMP_MODULE_NOTHING == module) { return; } - cout << "========================================== " << sqlNo_ << " sql : [" << stmtEnv_.sql_ << "]" << endl; - if (DUMP_MODULE_ALL == module || DUMP_MODULE_PARSER == module) { if (res_.prepareAst_.empty()) { cout << "+++++++++++++++++++++syntax tree : " << endl; @@ -343,6 +343,7 @@ class PlannerTestBaseImpl { cxt.pMsg = stmtEnv_.msgBuf_.data(); cxt.msgLen = stmtEnv_.msgBuf_.max_size(); cxt.svrVer = "3.0.0.0"; + cxt.enableSysInfo = true; if (prepare) { SStmtCallback stmtCb = {0}; cxt.pStmtCb = &stmtCb; diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c index 5143aa4af1f90ba0e7a0ac2f37af6648ed68c685..d848016e46482614972d5e85469e4297136d6cc0 100644 --- a/source/libs/qcom/src/queryUtil.c +++ b/source/libs/qcom/src/queryUtil.c @@ -213,15 +213,25 @@ SSchema createSchema(int8_t type, int32_t bytes, col_id_t colId, const char* nam return s; } +void freeSTableMetaRspPointer(void *p) { + tFreeSTableMetaRsp(*(void**)p); + taosMemoryFreeClear(*(void**)p); +} + void destroyQueryExecRes(SExecResult* pRes) { if (NULL == pRes || NULL == pRes->res) { return; } switch (pRes->msgType) { + case TDMT_VND_CREATE_TABLE: { + taosArrayDestroyEx((SArray*)pRes->res, freeSTableMetaRspPointer); + break; + } + case TDMT_MND_CREATE_STB: case TDMT_VND_ALTER_TABLE: case TDMT_MND_ALTER_STB: { - tFreeSTableMetaRsp((STableMetaRsp*)pRes->res); + tFreeSTableMetaRsp(pRes->res); taosMemoryFreeClear(pRes->res); break; } diff --git a/source/libs/qcom/src/querymsg.c b/source/libs/qcom/src/querymsg.c index ed8786170d0e37f677d1b731d08eafb511875023..e2d3ac1583926da6fe9d9aff82392c4fcc3c2d65 100644 --- a/source/libs/qcom/src/querymsg.c +++ b/source/libs/qcom/src/querymsg.c @@ -354,6 +354,19 @@ static int32_t queryConvertTableMetaMsg(STableMetaRsp *pMetaMsg) { return TSDB_CODE_SUCCESS; } +int32_t queryCreateCTableMetaFromMsg(STableMetaRsp *msg, SCTableMeta *pMeta) { + pMeta->vgId = msg->vgId; + pMeta->tableType = msg->tableType; + pMeta->uid = msg->tuid; + pMeta->suid = msg->suid; + + qDebug("ctable %s uid %" PRIx64 " meta returned, type %d vgId:%d db %s suid %" PRIx64 , + msg->tbName, pMeta->uid, pMeta->tableType, pMeta->vgId, msg->dbFName, pMeta->suid); + + return TSDB_CODE_SUCCESS; +} + + int32_t queryCreateTableMetaFromMsg(STableMetaRsp *msg, bool isStb, STableMeta **pMeta) { int32_t total = msg->numOfColumns + msg->numOfTags; int32_t metaSize = sizeof(STableMeta) + sizeof(SSchema) * total; diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index e06b752862832fdda79b8569d7b453a25bb90145..f006096ce20a45e18a5b9d990c9c63b621638ac5 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -1,3 +1,5 @@ +#include "qworker.h" + #include "dataSinkMgt.h" #include "executor.h" #include "planner.h" @@ -7,7 +9,6 @@ #include "tcommon.h" #include "tmsg.h" #include "tname.h" -#include "qworker.h" SQWorkerMgmt gQwMgmt = { .lock = 0, @@ -15,7 +16,6 @@ SQWorkerMgmt gQwMgmt = { .qwNum = 0, }; - int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *req) { int32_t code = 0; SSchedulerHbRsp rsp = {0}; @@ -26,7 +26,7 @@ int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *re QW_LOCK(QW_WRITE, &sch->hbConnLock); sch->hbBrokenTs = taosGetTimestampMs(); - + if (qwMsg->connInfo.handle == sch->hbConnInfo.handle) { tmsgReleaseHandle(&sch->hbConnInfo, TAOS_CONN_SERVER); sch->hbConnInfo.handle = NULL; @@ -44,8 +44,8 @@ int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *re QW_RET(TSDB_CODE_SUCCESS); } -static void freeItem(void* param) { - SExplainExecInfo* pInfo = param; +static void freeItem(void *param) { + SExplainExecInfo *pInfo = param; taosMemoryFree(pInfo->verboseInfo); } @@ -54,7 +54,7 @@ int32_t qwHandleTaskComplete(QW_FPARAMS_DEF, SQWTaskCtx *ctx) { if (TASK_TYPE_TEMP == ctx->taskType && taskHandle) { if (ctx->explain) { - SArray* execInfoList = taosArrayInit(4, sizeof(SExplainExecInfo)); + SArray *execInfoList = taosArrayInit(4, sizeof(SExplainExecInfo)); QW_ERR_RET(qGetExplainExecInfo(taskHandle, execInfoList)); SRpcHandleInfo connInfo = ctx->ctrlConnInfo; @@ -81,7 +81,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) { qTaskInfo_t taskHandle = ctx->taskHandle; DataSinkHandle sinkHandle = ctx->sinkHandle; - SArray* pResList = taosArrayInit(4, POINTER_BYTES); + SArray *pResList = taosArrayInit(4, POINTER_BYTES); while (true) { QW_TASK_DLOG("start to execTask, loopIdx:%d", i++); @@ -95,7 +95,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) { } else { QW_TASK_DLOG("qExecTask failed, code:%x - %s", code, tstrerror(code)); } - QW_ERR_RET(code); + QW_ERR_JRET(code); } } @@ -105,7 +105,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) { QW_TASK_DLOG("qExecTask end with empty res, useconds:%" PRIu64, useconds); dsEndPut(sinkHandle, useconds); - QW_ERR_RET(qwHandleTaskComplete(QW_FPARAMS(), ctx)); + QW_ERR_JRET(qwHandleTaskComplete(QW_FPARAMS(), ctx)); if (queryStop) { *queryStop = true; @@ -114,7 +114,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) { break; } - for(int32_t j = 0; j < taosArrayGetSize(pResList); ++j) { + for (int32_t j = 0; j < taosArrayGetSize(pResList); ++j) { SSDataBlock *pRes = taosArrayGetP(pResList, j); ASSERT(pRes->info.rows > 0); @@ -122,7 +122,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) { code = dsPutDataBlock(sinkHandle, &inputData, &qcontinue); if (code) { QW_TASK_ELOG("dsPutDataBlock failed, code:%x - %s", code, tstrerror(code)); - QW_ERR_RET(code); + QW_ERR_JRET(code); } QW_TASK_DLOG("data put into sink, rows:%d, continueExecTask:%d", pRes->info.rows, qcontinue); @@ -132,7 +132,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) { if (queryStop) { *queryStop = true; } - + break; } @@ -149,6 +149,8 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) { } } +_return: + taosArrayDestroy(pResList); QW_RET(code); } @@ -222,7 +224,8 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, QW_ERR_RET(code); } - QW_TASK_DLOG("no more data in sink and query end, fetched blocks %d rows %d", pOutput->numOfBlocks, pOutput->numOfRows); + QW_TASK_DLOG("no more data in sink and query end, fetched blocks %d rows %d", pOutput->numOfBlocks, + pOutput->numOfRows); qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_SUCC); if (NULL == rsp) { @@ -266,7 +269,8 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, pOutput->numOfBlocks++; if (DS_BUF_EMPTY == pOutput->bufStatus && pOutput->queryEnd) { - QW_TASK_DLOG("task all data fetched and done, fetched blocks %d rows %d", pOutput->numOfBlocks, pOutput->numOfRows); + QW_TASK_DLOG("task all data fetched and done, fetched blocks %d rows %d", pOutput->numOfBlocks, + pOutput->numOfRows); qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_SUCC); break; } @@ -288,10 +292,10 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, } int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SDeleteRes *pRes) { - int64_t len = 0; - bool queryEnd = false; - int32_t code = 0; - SOutputData output = {0}; + int64_t len = 0; + bool queryEnd = false; + int32_t code = 0; + SOutputData output = {0}; dsGetDataLength(ctx->sinkHandle, &len, &queryEnd); @@ -304,7 +308,7 @@ int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SDeleteRes *pRes if (NULL == output.pData) { QW_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } - + code = dsGetDataBlock(ctx->sinkHandle, &output); if (code) { QW_TASK_ELOG("dsGetDataBlock failed, code:%x - %s", code, tstrerror(code)); @@ -312,8 +316,8 @@ int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SDeleteRes *pRes QW_ERR_RET(code); } - SDeleterRes* pDelRes = (SDeleterRes*)output.pData; - + SDeleterRes *pDelRes = (SDeleterRes *)output.pData; + pRes->suid = pDelRes->suid; pRes->uidList = pDelRes->uidList; pRes->skey = pDelRes->skey; @@ -322,14 +326,13 @@ int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SDeleteRes *pRes strcpy(pRes->tableFName, pDelRes->tableName); strcpy(pRes->tsColName, pDelRes->tsColName); taosMemoryFree(output.pData); - + return TSDB_CODE_SUCCESS; } - int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *input, SQWPhaseOutput *output) { - int32_t code = 0; - SQWTaskCtx *ctx = NULL; + int32_t code = 0; + SQWTaskCtx *ctx = NULL; QW_TASK_DLOG("start to handle event at phase %s", qwPhaseStr(phase)); @@ -355,8 +358,8 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu if (QW_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) { QW_ERR_JRET(qwDropTask(QW_FPARAMS())); - //qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code); - //QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); + // qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code); + // QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED); break; @@ -391,8 +394,8 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu if (QW_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) { QW_ERR_JRET(qwDropTask(QW_FPARAMS())); - //qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code); - //QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); + // qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code); + // QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED); } @@ -428,9 +431,9 @@ _return: } int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *input, SQWPhaseOutput *output) { - int32_t code = 0; - SQWTaskCtx *ctx = NULL; - SRpcHandleInfo connInfo = {0}; + int32_t code = 0; + SQWTaskCtx *ctx = NULL; + SRpcHandleInfo connInfo = {0}; QW_TASK_DLOG("start to handle event at phase %s", qwPhaseStr(phase)); @@ -449,8 +452,8 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); } - //qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code); - //QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); + // qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code); + // QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); QW_ERR_JRET(qwDropTask(QW_FPARAMS())); QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED); @@ -473,14 +476,14 @@ _return: if (QW_PHASE_POST_QUERY == phase && ctx) { ctx->queryRsped = true; - bool rsped = false; + bool rsped = false; SQWMsg qwMsg = {.msgType = ctx->msgType, .connInfo = ctx->ctrlConnInfo}; qwDbgSimulateRedirect(&qwMsg, ctx, &rsped); qwDbgSimulateDead(QW_FPARAMS(), ctx, &rsped); if (!rsped) { qwBuildAndSendQueryRsp(input->msgType + 1, &ctx->ctrlConnInfo, code, ctx); QW_TASK_DLOG("query msg rsped, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); - } + } } if (ctx) { @@ -507,7 +510,6 @@ int32_t qwAbortPrerocessQuery(QW_FPARAMS_DEF) { QW_RET(TSDB_CODE_SUCCESS); } - int32_t qwPreprocessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) { int32_t code = 0; bool queryRsped = false; @@ -537,8 +539,7 @@ _return: QW_RET(TSDB_CODE_SUCCESS); } - -int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char* sql) { +int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char *sql) { int32_t code = 0; bool queryRsped = false; SSubplan *plan = NULL; @@ -556,7 +557,7 @@ int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char* sql) { ctx->needFetch = qwMsg->msgInfo.needFetch; ctx->msgType = qwMsg->msgType; - //QW_TASK_DLOGL("subplan json string, len:%d, %s", qwMsg->msgLen, qwMsg->msg); + // QW_TASK_DLOGL("subplan json string, len:%d, %s", qwMsg->msgLen, qwMsg->msg); code = qStringToSubplan(qwMsg->msg, &plan); if (TSDB_CODE_SUCCESS != code) { @@ -594,7 +595,7 @@ int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char* sql) { _return: taosMemoryFree(sql); - + input.code = code; input.msgType = qwMsg->msgType; code = qwHandlePostPhaseEvents(QW_FPARAMS(), QW_PHASE_POST_QUERY, &input, NULL); @@ -648,7 +649,7 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) { qwBuildAndSendFetchRsp(ctx->fetchType, &qwMsg->connInfo, rsp, dataLen, code); rsp = NULL; - + QW_TASK_DLOG("fetch rsp send, handle:%p, code:%x - %s, dataLen:%d", qwMsg->connInfo.handle, code, tstrerror(code), dataLen); } else { @@ -754,13 +755,13 @@ _return: if (code || rsp) { bool rsped = false; if (ctx) { - qwDbgSimulateRedirect(qwMsg, ctx, &rsped); + qwDbgSimulateRedirect(qwMsg, ctx, &rsped); qwDbgSimulateDead(QW_FPARAMS(), ctx, &rsped); } if (!rsped) { qwBuildAndSendFetchRsp(qwMsg->msgType + 1, &qwMsg->connInfo, rsp, dataLen, code); - QW_TASK_DLOG("%s send, handle:%p, code:%x - %s, dataLen:%d", TMSG_INFO(qwMsg->msgType + 1), qwMsg->connInfo.handle, code, tstrerror(code), - dataLen); + QW_TASK_DLOG("%s send, handle:%p, code:%x - %s, dataLen:%d", TMSG_INFO(qwMsg->msgType + 1), + qwMsg->connInfo.handle, code, tstrerror(code), dataLen); } } @@ -919,10 +920,11 @@ void qwProcessHbTimerEvent(void *param, void *tmrId) { uint64_t *sId = taosHashGetKey(pIter, NULL); QW_TLOG("cancel send hb to sch %" PRIx64 " cause of no connection handle", *sId); - if (sch->hbBrokenTs > 0 && ((currentMs - sch->hbBrokenTs) > QW_SCH_TIMEOUT_MSEC) && taosHashGetSize(sch->tasksHash) <= 0) { + if (sch->hbBrokenTs > 0 && ((currentMs - sch->hbBrokenTs) > QW_SCH_TIMEOUT_MSEC) && + taosHashGetSize(sch->tasksHash) <= 0) { taosArrayPush(pExpiredSch, sId); } - + pIter = taosHashIterate(mgmt->schHash, pIter); continue; } @@ -998,7 +1000,6 @@ _return: QW_RET(TSDB_CODE_SUCCESS); } - int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb) { if (NULL == qWorkerMgmt || pMsgCb->mgmt == NULL) { qError("invalid param to init qworker"); @@ -1119,12 +1120,12 @@ int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pSt QW_RET(TSDB_CODE_QRY_INVALID_INPUT); } - SQWorker *mgmt = (SQWorker *)qWorkerMgmt; + SQWorker *mgmt = (SQWorker *)qWorkerMgmt; SDataSinkStat sinkStat = {0}; - + dsDataSinkGetCacheSize(&sinkStat); pStat->cacheDataSize = sinkStat.cachedSize; - + pStat->queryProcessed = QW_STAT_GET(mgmt->stat.msgStat.queryProcessed); pStat->cqueryProcessed = QW_STAT_GET(mgmt->stat.msgStat.cqueryProcessed); pStat->fetchProcessed = QW_STAT_GET(mgmt->stat.msgStat.fetchProcessed); @@ -1139,6 +1140,3 @@ int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pSt return TSDB_CODE_SUCCESS; } - - - diff --git a/source/libs/scalar/inc/filterInt.h b/source/libs/scalar/inc/filterInt.h index 54e873065b20c6317d96c6a56240c7920371a663..e7695b2f04ea4fed2ebf9a77bf00717e1978003e 100644 --- a/source/libs/scalar/inc/filterInt.h +++ b/source/libs/scalar/inc/filterInt.h @@ -276,7 +276,7 @@ struct SFilterInfo { #define FILTER_CLR_FLAG(st, f) st &= (~f) #define SIMPLE_COPY_VALUES(dst, src) *((int64_t *)dst) = *((int64_t *)src) -#define FILTER_PACKAGE_UNIT_HASH_KEY(v, optr, idx1, idx2) do { char *_t = (char *)v; _t[0] = optr; *(uint32_t *)(_t + 1) = idx1; *(uint32_t *)(_t + 3) = idx2; } while (0) +#define FLT_PACKAGE_UNIT_HASH_KEY(v, op1, op2, lidx, ridx, ridx2) do { char *_t = (char *)(v); _t[0] = (op1); _t[1] = (op2); *(uint32_t *)(_t + 2) = (lidx); *(uint32_t *)(_t + 2 + sizeof(uint32_t)) = (ridx); } while (0) #define FILTER_GREATER(cr,sflag,eflag) ((cr > 0) || ((cr == 0) && (FILTER_GET_FLAG(sflag,RANGE_FLG_EXCLUDE) || FILTER_GET_FLAG(eflag,RANGE_FLG_EXCLUDE)))) #define FILTER_COPY_RA(dst, src) do { (dst)->sflag = (src)->sflag; (dst)->eflag = (src)->eflag; (dst)->s = (src)->s; (dst)->e = (src)->e; } while (0) @@ -350,7 +350,7 @@ struct SFilterInfo { extern bool filterDoCompare(__compar_fn_t func, uint8_t optr, void *left, void *right); extern __compar_fn_t filterGetCompFunc(int32_t type, int32_t optr); -extern OptrStr gOptrStr[]; +extern __compar_fn_t filterGetCompFuncEx(int32_t lType, int32_t rType, int32_t optr); #ifdef __cplusplus } diff --git a/source/libs/scalar/inc/sclInt.h b/source/libs/scalar/inc/sclInt.h index d423b92da7e83589aacc6d384c0e2cafa0949038..15e9026ddbc2eea8ad4e066519dd4bbea9767b7e 100644 --- a/source/libs/scalar/inc/sclInt.h +++ b/source/libs/scalar/inc/sclInt.h @@ -45,6 +45,9 @@ typedef struct SScalarCtx { #define SCL_IS_CONST_CALC(_ctx) (NULL == (_ctx)->pBlockList) //#define SCL_IS_NULL_VALUE_NODE(_node) ((QUERY_NODE_VALUE == nodeType(_node)) && (TSDB_DATA_TYPE_NULL == ((SValueNode *)_node)->node.resType.type) && (((SValueNode *)_node)->placeholderNo <= 0)) #define SCL_IS_NULL_VALUE_NODE(_node) ((QUERY_NODE_VALUE == nodeType(_node)) && (TSDB_DATA_TYPE_NULL == ((SValueNode *)_node)->node.resType.type)) +#define SCL_IS_COMPARISON_OPERATOR(_opType) ((_opType) >= OP_TYPE_GREATER_THAN && (_opType) < OP_TYPE_IS_NOT_UNKNOWN) +#define SCL_DOWNGRADE_DATETYPE(_type) ((_type) == TSDB_DATA_TYPE_BIGINT || TSDB_DATA_TYPE_DOUBLE == (_type) || (_type) == TSDB_DATA_TYPE_UBIGINT) +#define SCL_NO_NEED_CONVERT_COMPARISION(_ltype, _rtype, _optr) (IS_NUMERIC_TYPE(_ltype) && IS_NUMERIC_TYPE(_rtype) && ((_optr) >= OP_TYPE_GREATER_THAN && (_optr) <= OP_TYPE_NOT_EQUAL)) #define sclFatal(...) qFatal(__VA_ARGS__) #define sclError(...) qError(__VA_ARGS__) diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index 1664a4d61283e0e393862de666bb90990355227a..9e676354374fce6c2e733ac8d42c45baef9bada8 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -24,46 +24,6 @@ #include "ttime.h" #include "functionMgt.h" -OptrStr gOptrStr[] = { - {0, "invalid"}, - {OP_TYPE_ADD, "+"}, - {OP_TYPE_SUB, "-"}, - {OP_TYPE_MULTI, "*"}, - {OP_TYPE_DIV, "/"}, - {OP_TYPE_REM, "%"}, - {OP_TYPE_MINUS, "minus"}, - {OP_TYPE_ASSIGN, "assign"}, - // bit operator - {OP_TYPE_BIT_AND, "&"}, - {OP_TYPE_BIT_OR, "|"}, - - // comparison operator - {OP_TYPE_GREATER_THAN, ">"}, - {OP_TYPE_GREATER_EQUAL, ">="}, - {OP_TYPE_LOWER_THAN, "<"}, - {OP_TYPE_LOWER_EQUAL, "<="}, - {OP_TYPE_EQUAL, "=="}, - {OP_TYPE_NOT_EQUAL, "!="}, - {OP_TYPE_IN, "in"}, - {OP_TYPE_NOT_IN, "not in"}, - {OP_TYPE_LIKE, "like"}, - {OP_TYPE_NOT_LIKE, "not like"}, - {OP_TYPE_MATCH, "match"}, - {OP_TYPE_NMATCH, "nmatch"}, - {OP_TYPE_IS_NULL, "is null"}, - {OP_TYPE_IS_NOT_NULL, "not null"}, - {OP_TYPE_IS_TRUE, "is true"}, - {OP_TYPE_IS_FALSE, "is false"}, - {OP_TYPE_IS_UNKNOWN, "is unknown"}, - {OP_TYPE_IS_NOT_TRUE, "not true"}, - {OP_TYPE_IS_NOT_FALSE, "not false"}, - {OP_TYPE_IS_NOT_UNKNOWN, "not unknown"}, - - // json operator - {OP_TYPE_JSON_GET_VALUE, "->"}, - {OP_TYPE_JSON_CONTAINS, "json contains"} -}; - bool filterRangeCompGi (const void *minv, const void *maxv, const void *minr, const void *maxr, __compar_fn_t cfunc) { int32_t result = cfunc(maxv, minr); return result >= 0; @@ -172,6 +132,77 @@ __compar_fn_t gDataCompare[] = {compareInt32Val, compareInt8Val, compareInt16Val compareChkNotInString, compareStrPatternNotMatch, compareWStrPatternNotMatch }; +__compar_fn_t gInt8SignCompare[] = { + compareInt8Val, compareInt8Int16, compareInt8Int32, compareInt8Int64, compareInt8Float, compareInt8Double +}; +__compar_fn_t gInt8UsignCompare[] = { + compareInt8Uint8, compareInt8Uint16, compareInt8Uint32, compareInt8Uint64 +}; + +__compar_fn_t gInt16SignCompare[] = { + compareInt16Int8, compareInt16Val, compareInt16Int32, compareInt16Int64, compareInt16Float, compareInt16Double +}; +__compar_fn_t gInt16UsignCompare[] = { + compareInt16Uint8, compareInt16Uint16, compareInt16Uint32, compareInt16Uint64 +}; + +__compar_fn_t gInt32SignCompare[] = { + compareInt32Int8, compareInt32Int16, compareInt32Val, compareInt32Int64, compareInt32Float, compareInt32Double +}; +__compar_fn_t gInt32UsignCompare[] = { + compareInt32Uint8, compareInt32Uint16, compareInt32Uint32, compareInt32Uint64 +}; + +__compar_fn_t gInt64SignCompare[] = { + compareInt64Int8, compareInt64Int16, compareInt64Int32, compareInt64Val, compareInt64Float, compareInt64Double +}; +__compar_fn_t gInt64UsignCompare[] = { + compareInt64Uint8, compareInt64Uint16, compareInt64Uint32, compareInt64Uint64 +}; + +__compar_fn_t gFloatSignCompare[] = { + compareFloatInt8, compareFloatInt16, compareFloatInt32, compareFloatInt64, compareFloatVal, compareFloatDouble +}; +__compar_fn_t gFloatUsignCompare[] = { + compareFloatUint8, compareFloatUint16, compareFloatUint32, compareFloatUint64 +}; + +__compar_fn_t gDoubleSignCompare[] = { + compareDoubleInt8, compareDoubleInt16, compareDoubleInt32, compareDoubleInt64, compareDoubleFloat, compareDoubleVal +}; +__compar_fn_t gDoubleUsignCompare[] = { + compareDoubleUint8, compareDoubleUint16, compareDoubleUint32, compareDoubleUint64 +}; + +__compar_fn_t gUint8SignCompare[] = { + compareUint8Int8, compareUint8Int16, compareUint8Int32, compareUint8Int64, compareUint8Float, compareUint8Double +}; +__compar_fn_t gUint8UsignCompare[] = { + compareUint8Val, compareUint8Uint16, compareUint8Uint32, compareUint8Uint64 +}; + +__compar_fn_t gUint16SignCompare[] = { + compareUint16Int8, compareUint16Int16, compareUint16Int32, compareUint16Int64, compareUint16Float, compareUint16Double +}; +__compar_fn_t gUint16UsignCompare[] = { + compareUint16Uint8, compareUint16Val, compareUint16Uint32, compareUint16Uint64 +}; + +__compar_fn_t gUint32SignCompare[] = { + compareUint32Int8, compareUint32Int16, compareUint32Int32, compareUint32Int64, compareUint32Float, compareUint32Double +}; +__compar_fn_t gUint32UsignCompare[] = { + compareUint32Uint8, compareUint32Uint16, compareUint32Val, compareUint32Uint64 +}; + +__compar_fn_t gUint64SignCompare[] = { + compareUint64Int8, compareUint64Int16, compareUint64Int32, compareUint64Int64, compareUint64Float, compareUint64Double +}; +__compar_fn_t gUint64UsignCompare[] = { + compareUint64Uint8, compareUint64Uint16, compareUint64Uint32, compareUint64Val +}; + + int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { int8_t comparFn = 0; @@ -297,6 +328,93 @@ __compar_fn_t filterGetCompFunc(int32_t type, int32_t optr) { return gDataCompare[filterGetCompFuncIdx(type, optr)]; } +__compar_fn_t filterGetCompFuncEx(int32_t lType, int32_t rType, int32_t optr) { + switch (lType) { + case TSDB_DATA_TYPE_TINYINT: { + if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) { + return gInt8SignCompare[rType - TSDB_DATA_TYPE_TINYINT]; + } else { + return gInt8UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT]; + } + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) { + return gInt16SignCompare[rType - TSDB_DATA_TYPE_TINYINT]; + } else { + return gInt16UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT]; + } + break; + } + case TSDB_DATA_TYPE_INT: { + if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) { + return gInt32SignCompare[rType - TSDB_DATA_TYPE_TINYINT]; + } else { + return gInt32UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT]; + } + break; + } + case TSDB_DATA_TYPE_BIGINT: { + if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) { + return gInt64SignCompare[rType - TSDB_DATA_TYPE_TINYINT]; + } else { + return gInt64UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT]; + } + break; + } + case TSDB_DATA_TYPE_FLOAT: { + if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) { + return gFloatSignCompare[rType - TSDB_DATA_TYPE_TINYINT]; + } else { + return gFloatUsignCompare[rType - TSDB_DATA_TYPE_UTINYINT]; + } + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) { + return gDoubleSignCompare[rType - TSDB_DATA_TYPE_TINYINT]; + } else { + return gDoubleUsignCompare[rType - TSDB_DATA_TYPE_UTINYINT]; + } + break; + } + case TSDB_DATA_TYPE_UTINYINT: { + if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) { + return gUint8SignCompare[rType - TSDB_DATA_TYPE_TINYINT]; + } else { + return gUint8UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT]; + } + break; + } + case TSDB_DATA_TYPE_USMALLINT: { + if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) { + return gUint16SignCompare[rType - TSDB_DATA_TYPE_TINYINT]; + } else { + return gUint16UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT]; + } + break; + } + case TSDB_DATA_TYPE_UINT: { + if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) { + return gUint32SignCompare[rType - TSDB_DATA_TYPE_TINYINT]; + } else { + return gUint32UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT]; + } + break; + } + case TSDB_DATA_TYPE_UBIGINT: { + if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) { + return gUint64SignCompare[rType - TSDB_DATA_TYPE_TINYINT]; + } else { + return gUint64UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT]; + } + break; + } + default: + break; + } + return NULL; +} static FORCE_INLINE int32_t filterCompareGroupCtx(const void *pLeft, const void *pRight) { SFilterGroupCtx *left = *((SFilterGroupCtx**)pLeft), *right = *((SFilterGroupCtx**)pRight); @@ -950,14 +1068,14 @@ int32_t filterAddFieldFromNode(SFilterInfo *info, SNode *node, SFilterFieldId *f return TSDB_CODE_SUCCESS; } -int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFilterFieldId *right, uint32_t *uidx) { +int32_t filterAddUnitImpl(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFilterFieldId *right, uint8_t optr2, SFilterFieldId *right2, uint32_t *uidx) { if (FILTER_GET_FLAG(info->options, FLT_OPTION_NEED_UNIQE)) { if (info->pctx.unitHash == NULL) { info->pctx.unitHash = taosHashInit(FILTER_DEFAULT_GROUP_SIZE * FILTER_DEFAULT_UNIT_SIZE, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, false); } else { - int64_t v = 0; - FILTER_PACKAGE_UNIT_HASH_KEY(&v, optr, left->idx, right ? right->idx : -1); - void *hu = taosHashGet(info->pctx.unitHash, &v, sizeof(v)); + char v[14] = {0}; + FLT_PACKAGE_UNIT_HASH_KEY(&v, optr, optr2, left->idx, (right ? right->idx : -1), (right2 ? right2->idx : -1)); + void *hu = taosHashGet(info->pctx.unitHash, v, sizeof(v)); if (hu) { *uidx = *(uint32_t *)hu; return TSDB_CODE_SUCCESS; @@ -979,14 +1097,18 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi if (right) { u->right = *right; } - + u->compare.optr2 = optr2; + if (right2) { + u->right2 = *right2; + } + if (u->right.type == FLD_TYPE_VALUE) { SFilterField *val = FILTER_UNIT_RIGHT_FIELD(info, u); assert(FILTER_GET_FLAG(val->flag, FLD_TYPE_VALUE)); } else { int32_t paramNum = scalarGetOperatorParamNum(optr); if (1 != paramNum) { - fltError("invalid right field in unit, operator:%s, rightType:%d", gOptrStr[optr].str, u->right.type); + fltError("invalid right field in unit, operator:%s, rightType:%d", operatorTypeStr(optr), u->right.type); return TSDB_CODE_QRY_APP_ERROR; } } @@ -1000,9 +1122,9 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi *uidx = info->unitNum; if (FILTER_GET_FLAG(info->options, FLT_OPTION_NEED_UNIQE)) { - int64_t v = 0; - FILTER_PACKAGE_UNIT_HASH_KEY(&v, optr, left->idx, right ? right->idx : -1); - taosHashPut(info->pctx.unitHash, &v, sizeof(v), uidx, sizeof(*uidx)); + char v[14] = {0}; + FLT_PACKAGE_UNIT_HASH_KEY(&v, optr, optr2, left->idx, (right ? right->idx : -1), (right2 ? right2->idx : -1)); + taosHashPut(info->pctx.unitHash, v, sizeof(v), uidx, sizeof(*uidx)); } ++info->unitNum; @@ -1011,6 +1133,9 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi } +int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFilterFieldId *right, uint32_t *uidx) { + return filterAddUnitImpl(info, optr, left, right, 0, NULL, uidx); +} int32_t filterAddUnitToGroup(SFilterGroup *group, uint32_t unitIdx) { if (group->unitNum >= group->unitSize) { @@ -1187,8 +1312,8 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan SIMPLE_COPY_VALUES(data2, &ra->e); filterAddField(dst, NULL, &data2, FLD_TYPE_VALUE, &right2, tDataTypes[type].bytes, true); - filterAddUnit(dst, FILTER_GET_FLAG(ra->sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right, &uidx); - filterAddUnitRight(dst, FILTER_GET_FLAG(ra->eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, uidx); + filterAddUnitImpl(dst, FILTER_GET_FLAG(ra->sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right, + FILTER_GET_FLAG(ra->eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, &uidx); filterAddUnitToGroup(g, uidx); return TSDB_CODE_SUCCESS; } @@ -1262,8 +1387,8 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan SIMPLE_COPY_VALUES(data2, &r->ra.e); filterAddField(dst, NULL, &data2, FLD_TYPE_VALUE, &right2, tDataTypes[type].bytes, true); - filterAddUnit(dst, FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right, &uidx); - filterAddUnitRight(dst, FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, uidx); + filterAddUnitImpl(dst, FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right, + FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, &uidx); filterAddUnitToGroup(g, uidx); } @@ -1517,7 +1642,7 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options) SFilterField *left = FILTER_UNIT_LEFT_FIELD(info, unit); SColumnNode *refNode = (SColumnNode *)left->desc; if (unit->compare.optr >= 0 && unit->compare.optr <= OP_TYPE_JSON_CONTAINS){ - len = sprintf(str, "UNIT[%d] => [%d][%d] %s [", i, refNode->dataBlockId, refNode->slotId, gOptrStr[unit->compare.optr].str); + len = sprintf(str, "UNIT[%d] => [%d][%d] %s [", i, refNode->dataBlockId, refNode->slotId, operatorTypeStr(unit->compare.optr)); } if (unit->right.type == FLD_TYPE_VALUE && FILTER_UNIT_OPTR(unit) != OP_TYPE_IN) { @@ -1536,7 +1661,7 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options) if (unit->compare.optr2) { strcat(str, " && "); if (unit->compare.optr2 >= 0 && unit->compare.optr2 <= OP_TYPE_JSON_CONTAINS){ - sprintf(str + strlen(str), "[%d][%d] %s [", refNode->dataBlockId, refNode->slotId, gOptrStr[unit->compare.optr2].str); + sprintf(str + strlen(str), "[%d][%d] %s [", refNode->dataBlockId, refNode->slotId, operatorTypeStr(unit->compare.optr2)); } if (unit->right2.type == FLD_TYPE_VALUE && FILTER_UNIT_OPTR(unit) != OP_TYPE_IN) { @@ -2113,6 +2238,44 @@ int32_t filterMergeGroupUnits(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t return TSDB_CODE_SUCCESS; } +bool filterIsSameUnits(SFilterColInfo* pCol1, SFilterColInfo* pCol2) { + if (pCol1->type != pCol2->type) { + return false; + } + + if (RANGE_TYPE_MR_CTX == pCol1->type) { + SFilterRangeCtx* pCtx1 = (SFilterRangeCtx*)pCol1->info; + SFilterRangeCtx* pCtx2 = (SFilterRangeCtx*)pCol2->info; + + if ((pCtx1->isnull != pCtx2->isnull) || (pCtx1->notnull != pCtx2->notnull) || (pCtx1->isrange != pCtx2->isrange)) { + return false; + } + + + SFilterRangeNode* pNode1 = pCtx1->rs; + SFilterRangeNode* pNode2 = pCtx2->rs; + + while (true) { + if (NULL == pNode1 && NULL == pNode2) { + break; + } + + if (NULL == pNode1 || NULL == pNode2) { + return false; + } + + if (pNode1->ra.s != pNode2->ra.s || pNode1->ra.e != pNode2->ra.e || pNode1->ra.sflag != pNode2->ra.sflag || pNode1->ra.eflag != pNode2->ra.eflag) { + return false; + } + + pNode1 = pNode1->next; + pNode2 = pNode2->next; + } + } + + return true; +} + void filterCheckColConflict(SFilterGroupCtx* gRes1, SFilterGroupCtx* gRes2, bool *conflict) { uint32_t idx1 = 0, idx2 = 0, m = 0, n = 0; bool equal = false; @@ -2138,6 +2301,11 @@ void filterCheckColConflict(SFilterGroupCtx* gRes1, SFilterGroupCtx* gRes2, bool return; } + if (!filterIsSameUnits(&gRes1->colInfo[idx1], &gRes2->colInfo[idx2])) { + *conflict = true; + return; + } + // for long in operation if (gRes1->colInfo[idx1].optr == OP_TYPE_EQUAL && gRes2->colInfo[idx2].optr == OP_TYPE_EQUAL) { SFilterRangeCtx* ctx = gRes1->colInfo[idx1].info; @@ -2751,17 +2919,22 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3 for (uint32_t g = 0; g < info->groupNum; ++g) { SFilterGroup *group = &info->groups[g]; + // first is block unint num for a group, following append unitNum blkUnitIdx for this group *unitNum = group->unitNum; all = 0; empty = 0; + // save group idx start pointer + uint32_t * pGroupIdx = unitIdx; for (uint32_t u = 0; u < group->unitNum; ++u) { uint32_t uidx = group->unitIdxs[u]; if (info->blkUnitRes[uidx] == 1) { + // blkUnitRes == 1 is always true, so need not compare every time, delete this unit from group --(*unitNum); all = 1; continue; } else if (info->blkUnitRes[uidx] == -1) { + // blkUnitRes == -1 is alwary false, so in group is alwary false, need delete this group from blkGroupNum *unitNum = 0; empty = 1; break; @@ -2771,6 +2944,9 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3 } if (*unitNum == 0) { + // if unit num is zero, reset unitIdx to start on this group + unitIdx = pGroupIdx; + --info->blkGroupNum; assert(empty || all); diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index d0c5a76f4b03e38851e5810b2143fa9b65cb782f..cd1f6624bdf83e4fe143c1a648e5e30947bcdd65 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -9,6 +9,7 @@ #include "scalar.h" #include "tudf.h" #include "ttime.h" +#include "tcompare.h" int32_t scalarGetOperatorParamNum(EOperatorType type) { if (OP_TYPE_IS_NULL == type || OP_TYPE_IS_NOT_NULL == type || OP_TYPE_IS_TRUE == type || OP_TYPE_IS_NOT_TRUE == type @@ -219,6 +220,82 @@ void sclFreeParamList(SScalarParam *param, int32_t paramNum) { taosMemoryFree(param); } +void sclDowngradeValueType(SValueNode *valueNode) { + switch (valueNode->node.resType.type) { + case TSDB_DATA_TYPE_BIGINT: { + int8_t i8 = valueNode->datum.i; + if (i8 == valueNode->datum.i) { + valueNode->node.resType.type = TSDB_DATA_TYPE_TINYINT; + *(int8_t*)&valueNode->typeData = i8; + break; + } + int16_t i16 = valueNode->datum.i; + if (i16 == valueNode->datum.i) { + valueNode->node.resType.type = TSDB_DATA_TYPE_SMALLINT; + *(int16_t*)&valueNode->typeData = i16; + break; + } + int32_t i32 = valueNode->datum.i; + if (i32 == valueNode->datum.i) { + valueNode->node.resType.type = TSDB_DATA_TYPE_INT; + *(int32_t*)&valueNode->typeData = i32; + break; + } + break; + } + case TSDB_DATA_TYPE_UBIGINT:{ + uint8_t u8 = valueNode->datum.i; + if (u8 == valueNode->datum.i) { + int8_t i8 = valueNode->datum.i; + if (i8 == valueNode->datum.i) { + valueNode->node.resType.type = TSDB_DATA_TYPE_TINYINT; + *(int8_t*)&valueNode->typeData = i8; + } else { + valueNode->node.resType.type = TSDB_DATA_TYPE_UTINYINT; + *(uint8_t*)&valueNode->typeData = u8; + } + break; + } + uint16_t u16 = valueNode->datum.i; + if (u16 == valueNode->datum.i) { + int16_t i16 = valueNode->datum.i; + if (i16 == valueNode->datum.i) { + valueNode->node.resType.type = TSDB_DATA_TYPE_SMALLINT; + *(int16_t*)&valueNode->typeData = i16; + } else { + valueNode->node.resType.type = TSDB_DATA_TYPE_USMALLINT; + *(uint16_t*)&valueNode->typeData = u16; + } + break; + } + uint32_t u32 = valueNode->datum.i; + if (u32 == valueNode->datum.i) { + int32_t i32 = valueNode->datum.i; + if (i32 == valueNode->datum.i) { + valueNode->node.resType.type = TSDB_DATA_TYPE_INT; + *(int32_t*)&valueNode->typeData = i32; + } else { + valueNode->node.resType.type = TSDB_DATA_TYPE_UINT; + *(uint32_t*)&valueNode->typeData = u32; + } + break; + } + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + float f = valueNode->datum.d; + if (FLT_EQUAL(f, valueNode->datum.d)) { + valueNode->node.resType.type = TSDB_DATA_TYPE_FLOAT; + *(float*)&valueNode->typeData = f; + break; + } + break; + } + default: + break; + } +} + int32_t sclInitParam(SNode* node, SScalarParam *param, SScalarCtx *ctx, int32_t *rowNum) { switch (nodeType(node)) { case QUERY_NODE_LEFT_VALUE: { @@ -292,6 +369,9 @@ int32_t sclInitParam(SNode* node, SScalarParam *param, SScalarCtx *ctx, int32_t } SColumnInfoData *columnData = (SColumnInfoData *)taosArrayGet(block->pDataBlock, ref->slotId); +#if TAG_FILTER_DEBUG + qDebug("tagfilter column info, slotId:%d, colId:%d, type:%d", ref->slotId, columnData->info.colId, columnData->info.type); +#endif param->numOfRows = block->info.rows; param->columnData = columnData; break; @@ -672,6 +752,10 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) { return DEAL_RES_ERROR; } } + + if (SCL_IS_COMPARISON_OPERATOR(node->opType) && SCL_DOWNGRADE_DATETYPE(valueNode->node.resType.type)) { + sclDowngradeValueType(valueNode); + } } if (node->pRight && (QUERY_NODE_VALUE == nodeType(node->pRight))) { @@ -689,6 +773,10 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) { return DEAL_RES_ERROR; } } + + if (SCL_IS_COMPARISON_OPERATOR(node->opType) && SCL_DOWNGRADE_DATETYPE(valueNode->node.resType.type)) { + sclDowngradeValueType(valueNode); + } } if (node->pRight && (QUERY_NODE_NODE_LIST == nodeType(node->pRight))) { @@ -758,7 +846,9 @@ EDealRes sclRewriteFunction(SNode** pNode, SScalarCtx *ctx) { res->datum.p = taosMemoryCalloc(len, 1); memcpy(res->datum.p, output.columnData->pData, len); } else if (IS_VAR_DATA_TYPE(type)) { - res->datum.p = taosMemoryCalloc(res->node.resType.bytes + VARSTR_HEADER_SIZE + 1, 1); + //res->datum.p = taosMemoryCalloc(res->node.resType.bytes + VARSTR_HEADER_SIZE + 1, 1); + res->datum.p = taosMemoryCalloc(varDataTLen(output.columnData->pData), 1); + res->node.resType.bytes = varDataTLen(output.columnData->pData); memcpy(res->datum.p, output.columnData->pData, varDataTLen(output.columnData->pData)); } else { nodesSetValueNodeValue(res, output.columnData->pData); diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 2d889dd925e893c0684a3317c4fe2a6e3eb1d671..4ead1147e47e8777a422cd8f63a74d4017a368a5 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -642,7 +642,6 @@ static int32_t doTrimFunction(SScalarParam *pInput, int32_t inputNum, SScalarPar int32_t charLen = (type == TSDB_DATA_TYPE_VARCHAR) ? len : len / TSDB_NCHAR_SIZE; trimFn(input, output, type, charLen); - varDataSetLen(output, len); colDataAppend(pOutputData, i, output, false); output += varDataTLen(output); } diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index aaa70ef5ae5f8ab00ce88b56433885cd00004893..a003315fcabeab38f49ae3a6056e25dff10e4e16 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -909,11 +909,11 @@ int32_t vectorConvertImpl(const SScalarParam* pIn, SScalarParam* pOut, int32_t* int8_t gConvertTypes[TSDB_DATA_TYPE_BLOB+1][TSDB_DATA_TYPE_BLOB+1] = { /* NULL BOOL TINY SMAL INT BIG FLOA DOUB VARC TIME NCHA UTIN USMA UINT UBIG JSON VARB DECI BLOB */ /*NULL*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -/*BOOL*/ 0, 0, 0, 3, 4, 5, 6, 7, 7, 9, 7, 0, 12, 13, 14, 0, 7, 0, 0, +/*BOOL*/ 0, 0, 2, 3, 4, 5, 6, 7, 7, 9, 7, 11, 12, 13, 14, 0, 7, 0, 0, /*TINY*/ 0, 0, 0, 3, 4, 5, 6, 7, 7, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0, /*SMAL*/ 0, 0, 0, 0, 4, 5, 6, 7, 7, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0, /*INT */ 0, 0, 0, 0, 0, 5, 6, 7, 7, 9, 7, 4, 4, 5, 7, 0, 7, 0, 0, -/*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 7, 0, 7, 5, 5, 5, 7, 0, 7, 0, 0, +/*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 7, 9, 7, 5, 5, 5, 7, 0, 7, 0, 0, /*FLOA*/ 0, 0, 0, 0, 0, 0, 0, 7, 7, 6, 7, 6, 6, 6, 6, 0, 7, 0, 0, /*DOUB*/ 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 0, 7, 0, 0, /*VARC*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 8, 7, 7, 7, 7, 0, 0, 0, 0, @@ -1681,10 +1681,14 @@ void vectorBitOr(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, void vectorCompareImpl(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord, int32_t optr) { int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : TMAX(pLeft->numOfRows, pRight->numOfRows) - 1; int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; - - __compar_fn_t fp = filterGetCompFunc(GET_PARAM_TYPE(pLeft), optr); - if(terrno != TSDB_CODE_SUCCESS){ - return; + int32_t lType = GET_PARAM_TYPE(pLeft); + int32_t rType = GET_PARAM_TYPE(pRight); + __compar_fn_t fp = NULL; + + if (lType == rType) { + fp = filterGetCompFunc(lType, optr); + } else { + fp = filterGetCompFuncEx(lType, rType, optr); } pOut->numOfRows = TMAX(pLeft->numOfRows, pRight->numOfRows); @@ -1716,22 +1720,26 @@ void vectorCompareImpl(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam * void vectorCompare(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord, int32_t optr) { SScalarParam pLeftOut = {0}; SScalarParam pRightOut = {0}; - - vectorConvert(pLeft, pRight, &pLeftOut, &pRightOut); - SScalarParam *param1 = NULL; SScalarParam *param2 = NULL; - if (pLeftOut.columnData != NULL) { - param1 = &pLeftOut; - } else { + if (SCL_NO_NEED_CONVERT_COMPARISION(GET_PARAM_TYPE(pLeft), GET_PARAM_TYPE(pRight), optr)) { param1 = pLeft; - } - - if (pRightOut.columnData != NULL) { - param2 = &pRightOut; - } else { param2 = pRight; + } else { + vectorConvert(pLeft, pRight, &pLeftOut, &pRightOut); + + if (pLeftOut.columnData != NULL) { + param1 = &pLeftOut; + } else { + param1 = pLeft; + } + + if (pRightOut.columnData != NULL) { + param2 = &pRightOut; + } else { + param2 = pRight; + } } vectorCompareImpl(param1, param2, pOut, _ord, optr); diff --git a/source/libs/scalar/test/scalar/scalarTests.cpp b/source/libs/scalar/test/scalar/scalarTests.cpp index 9b40f0a4659ee9a8a85131c9daf017b8c3b17ce2..7229fdec386b7df5bb626ea0524144d4f42dd995 100644 --- a/source/libs/scalar/test/scalar/scalarTests.cpp +++ b/source/libs/scalar/test/scalar/scalarTests.cpp @@ -1089,16 +1089,16 @@ void makeCalculate(void *json, void *key, int32_t rightType, void *rightData, do }else if(opType == OP_TYPE_ADD || opType == OP_TYPE_SUB || opType == OP_TYPE_MULTI || opType == OP_TYPE_DIV || opType == OP_TYPE_REM || opType == OP_TYPE_MINUS){ - printf("op:%s,1result:%f,except:%f\n", gOptrStr[opType].str, *((double *)colDataGetData(column, 0)), exceptValue); + printf("op:%s,1result:%f,except:%f\n", operatorTypeStr(opType), *((double *)colDataGetData(column, 0)), exceptValue); ASSERT_TRUE(fabs(*((double *)colDataGetData(column, 0)) - exceptValue) < 0.0001); }else if(opType == OP_TYPE_BIT_AND || opType == OP_TYPE_BIT_OR){ - printf("op:%s,2result:%" PRId64 ",except:%f\n", gOptrStr[opType].str, *((int64_t *)colDataGetData(column, 0)), exceptValue); + printf("op:%s,2result:%" PRId64 ",except:%f\n", operatorTypeStr(opType), *((int64_t *)colDataGetData(column, 0)), exceptValue); ASSERT_EQ(*((int64_t *)colDataGetData(column, 0)), exceptValue); }else if(opType == OP_TYPE_GREATER_THAN || opType == OP_TYPE_GREATER_EQUAL || opType == OP_TYPE_LOWER_THAN || opType == OP_TYPE_LOWER_EQUAL || opType == OP_TYPE_EQUAL || opType == OP_TYPE_NOT_EQUAL || opType == OP_TYPE_IS_NULL || opType == OP_TYPE_IS_NOT_NULL || opType == OP_TYPE_IS_TRUE || opType == OP_TYPE_LIKE || opType == OP_TYPE_NOT_LIKE || opType == OP_TYPE_MATCH || opType == OP_TYPE_NMATCH){ - printf("op:%s,3result:%d,except:%f\n", gOptrStr[opType].str, *((bool *)colDataGetData(column, 0)), exceptValue); + printf("op:%s,3result:%d,except:%f\n", operatorTypeStr(opType), *((bool *)colDataGetData(column, 0)), exceptValue); ASSERT_EQ(*((bool *)colDataGetData(column, 0)), exceptValue); } diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h index 1b3d75f33b57e0a20b0d91d7724a7a0684eef765..957fd46ba5a767858a3bb5bbe50142b4f1c1ce47 100644 --- a/source/libs/scheduler/inc/schInt.h +++ b/source/libs/scheduler/inc/schInt.h @@ -60,8 +60,7 @@ typedef enum { #define SCH_DEFAULT_TASK_TIMEOUT_USEC 10000000 #define SCH_MAX_TASK_TIMEOUT_USEC 60000000 #define SCH_DEFAULT_MAX_RETRY_NUM 6 - -#define SCH_ASYNC_LAUNCH_TASK 0 +#define SCH_MIN_AYSNC_EXEC_NUM 3 typedef struct SSchDebug { bool lockEnable; @@ -284,7 +283,7 @@ typedef struct SSchJob { } SSchJob; typedef struct SSchTaskCtx { - SSchJob *pJob; + int64_t jobRid; SSchTask *pTask; } SSchTaskCtx; diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index 8c9003a9b2af371ebdb50620f53d382c04609a03..5a64aaaebb3860d2c6729ac8eb1e00be0cc9cda1 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -20,7 +20,7 @@ #include "tmsg.h" #include "tref.h" #include "trpc.h" - +// clang-format off int32_t schValidateRspMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgType) { int32_t lastMsgType = pTask->lastMsgType; int32_t taskStatus = SCH_GET_TASK_STATUS(pTask); @@ -102,15 +102,30 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa tDecoderInit(&coder, msg, msgSize); code = tDecodeSVCreateTbBatchRsp(&coder, &batchRsp); if (TSDB_CODE_SUCCESS == code && batchRsp.nRsps > 0) { + SCH_LOCK(SCH_WRITE, &pJob->resLock); + if (NULL == pJob->execRes.res) { + pJob->execRes.res = taosArrayInit(batchRsp.nRsps, POINTER_BYTES); + pJob->execRes.msgType = TDMT_VND_CREATE_TABLE; + } + for (int32_t i = 0; i < batchRsp.nRsps; ++i) { SVCreateTbRsp *rsp = batchRsp.pRsps + i; + if (rsp->pMeta) { + taosArrayPush((SArray*)pJob->execRes.res, &rsp->pMeta); + } + if (TSDB_CODE_SUCCESS != rsp->code) { code = rsp->code; - tDecoderClear(&coder); - SCH_ERR_JRET(code); } } + SCH_UNLOCK(SCH_WRITE, &pJob->resLock); + + if (taosArrayGetSize((SArray*)pJob->execRes.res) <= 0) { + taosArrayDestroy((SArray*)pJob->execRes.res); + pJob->execRes.res = NULL; + } } + tDecoderClear(&coder); SCH_ERR_JRET(code); } @@ -402,7 +417,7 @@ int32_t schHandleDropCallback(void *param, SDataBuf *pMsg, int32_t code) { qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 " drop task rsp received, code:0x%x", pParam->queryId, pParam->taskId, code); if (pMsg) { - taosMemoryFree(pMsg->pData); + taosMemoryFree(pMsg->pData); } return TSDB_CODE_SUCCESS; } @@ -415,7 +430,7 @@ int32_t schHandleLinkBrokenCallback(void *param, SDataBuf *pMsg, int32_t code) { if (head->isHbParam) { taosMemoryFree(pMsg->pData); - + SSchHbCallbackParam *hbParam = (SSchHbCallbackParam *)param; SSchTrans trans = {.pTrans = hbParam->pTrans, .pHandle = NULL}; SCH_ERR_RET(schUpdateHbConnection(&hbParam->nodeEpId, &trans)); @@ -1104,7 +1119,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, #if 1 SSchTrans trans = {.pTrans = pJob->conn.pTrans, .pHandle = SCH_GET_TASK_HANDLE(pTask)}; - schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL)); + code = schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL)); msg = NULL; SCH_ERR_JRET(code); @@ -1114,7 +1129,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, #else if (TDMT_VND_SUBMIT != msgType) { SSchTrans trans = {.pTrans = pJob->conn.pTrans, .pHandle = SCH_GET_TASK_HANDLE(pTask)}; - schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL)); + code = schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL)); msg = NULL; SCH_ERR_JRET(code); @@ -1136,3 +1151,4 @@ _return: taosMemoryFreeClear(msg); SCH_RET(code); } +// clang-format on diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index 612b908d4196856d45d7100a48e0fc38e291fb86..9cab39c30122072207daa9e9639ab92645fc1633 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -821,7 +821,13 @@ int32_t schProcessOnTaskStatusRsp(SQueryNodeEpId *pEpId, SArray *pStatusList) { int32_t schLaunchTaskImpl(void *param) { SSchTaskCtx *pCtx = (SSchTaskCtx *)param; - SSchJob *pJob = pCtx->pJob; + SSchJob *pJob = schAcquireJob(pCtx->jobRid); + if (NULL == pJob) { + taosMemoryFree(param); + qDebug("job refId 0x%" PRIx64 " already not exist", pCtx->jobRid); + SCH_RET(TSDB_CODE_SCH_JOB_IS_DROPPING); + } + SSchTask *pTask = pCtx->pTask; int8_t status = 0; int32_t code = 0; @@ -871,14 +877,16 @@ _return: taosMemoryFree(param); -#if SCH_ASYNC_LAUNCH_TASK - if (code) { - code = schProcessOnTaskFailure(pJob, pTask, code); - } - if (code) { - code = schHandleJobFailure(pJob, code); + if (pJob->taskNum >= SCH_MIN_AYSNC_EXEC_NUM) { + if (code) { + code = schProcessOnTaskFailure(pJob, pTask, code); + } + if (code) { + code = schHandleJobFailure(pJob, code); + } } -#endif + + schReleaseJob(pJob->refId); SCH_RET(code); } @@ -890,15 +898,15 @@ int32_t schAsyncLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) { SCH_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } - param->pJob = pJob; + param->jobRid = pJob->refId; param->pTask = pTask; -#if SCH_ASYNC_LAUNCH_TASK - taosAsyncExec(schLaunchTaskImpl, param, NULL); -#else - SCH_ERR_RET(schLaunchTaskImpl(param)); -#endif - + if (pJob->taskNum >= SCH_MIN_AYSNC_EXEC_NUM) { + taosAsyncExec(schLaunchTaskImpl, param, NULL); + } else { + SCH_ERR_RET(schLaunchTaskImpl(param)); + } + return TSDB_CODE_SUCCESS; } diff --git a/source/libs/stream/inc/streamInc.h b/source/libs/stream/inc/streamInc.h index 3776cb261f5f19cf291f66c28efc57a54489a592..6e30eeaa8643273da0669159571caad793e8b034 100644 --- a/source/libs/stream/inc/streamInc.h +++ b/source/libs/stream/inc/streamInc.h @@ -32,7 +32,6 @@ typedef struct { static SStreamGlobalEnv streamEnv; -int32_t streamExec(SStreamTask* pTask); int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum, bool dispatch); int32_t streamDispatch(SStreamTask* pTask); diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index 6da7d4fd59028ac09a4493dfcd311769c619d52d..d6e87c27366da27dda96a41c7a9d2fda92c652a9 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -185,7 +185,9 @@ int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, S tFreeStreamDispatchReq(pReq); if (exec) { - streamTryExec(pTask); + if (streamTryExec(pTask) < 0) { + return -1; + } if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) { streamDispatch(pTask); @@ -221,7 +223,9 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp) { } int32_t streamProcessRunReq(SStreamTask* pTask) { - streamTryExec(pTask); + if (streamTryExec(pTask) < 0) { + return -1; + } if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) { streamDispatch(pTask); diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index c78ff0756f22e50e1f64a3f02526a04376eb9b08..9d4010f60e5fcb222e235181a2ce12b8d4dc4102 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -358,7 +358,7 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat FAIL_SHUFFLE_DISPATCH: if (pReqs) { for (int32_t i = 0; i < vgSz; i++) { - taosArrayDestroy(pReqs[i].data); + taosArrayDestroyP(pReqs[i].data, taosMemoryFree); taosArrayDestroy(pReqs[i].dataLen); } taosMemoryFree(pReqs); diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 06ca26f0292df2447fa7c267a0d43e65f4117964..102bad742652005df440b5d4d7a87bcef34ba636 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -140,7 +140,6 @@ int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum, bool dispatch) return 0; } -// TODO: handle version int32_t streamExecForAll(SStreamTask* pTask) { while (1) { int32_t batchCnt = 1; diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index b74e8386280a9a3a90da87dd12d37806a588cfa5..1442ed2e0509e37d8b21806dc05343adcaa0f32c 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -14,7 +14,8 @@ */ #include "executor.h" -#include "tstream.h" +#include "streamInc.h" +#include "ttimer.h" SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc) { SStreamMeta* pMeta = taosMemoryCalloc(1, sizeof(SStreamMeta)); @@ -22,17 +23,23 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } - pMeta->path = strdup(path); + int32_t len = strlen(path) + 20; + char* streamPath = taosMemoryCalloc(1, len); + sprintf(streamPath, "%s/%s", path, "stream"); + pMeta->path = strdup(streamPath); if (tdbOpen(pMeta->path, 16 * 1024, 1, &pMeta->db) < 0) { goto _err; } + sprintf(streamPath, "%s/%s", pMeta->path, "checkpoints"); + mkdir(streamPath, 0755); + taosMemoryFree(streamPath); + if (tdbTbOpen("task.db", sizeof(int32_t), -1, NULL, pMeta->db, &pMeta->pTaskDb) < 0) { goto _err; } - // open state storage backend - if (tdbTbOpen("state.db", sizeof(int32_t), -1, NULL, pMeta->db, &pMeta->pStateDb) < 0) { + if (tdbTbOpen("checkpoint.db", sizeof(int32_t), -1, NULL, pMeta->db, &pMeta->pCheckpointDb) < 0) { goto _err; } @@ -48,16 +55,13 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF pMeta->ahandle = ahandle; pMeta->expandFunc = expandFunc; - if (streamLoadTasks(pMeta) < 0) { - goto _err; - } return pMeta; _err: if (pMeta->path) taosMemoryFree(pMeta->path); if (pMeta->pTasks) taosHashCleanup(pMeta->pTasks); - if (pMeta->pStateDb) tdbTbClose(pMeta->pStateDb); if (pMeta->pTaskDb) tdbTbClose(pMeta->pTaskDb); + if (pMeta->pCheckpointDb) tdbTbClose(pMeta->pCheckpointDb); if (pMeta->db) tdbClose(pMeta->db); taosMemoryFree(pMeta); return NULL; @@ -66,7 +70,7 @@ _err: void streamMetaClose(SStreamMeta* pMeta) { tdbCommit(pMeta->db, &pMeta->txn); tdbTbClose(pMeta->pTaskDb); - tdbTbClose(pMeta->pStateDb); + tdbTbClose(pMeta->pCheckpointDb); tdbClose(pMeta->db); void* pIter = NULL; @@ -81,7 +85,7 @@ void streamMetaClose(SStreamMeta* pMeta) { taosMemoryFree(pMeta); } -int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, char* msg, int32_t msgLen) { +int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char* msg, int32_t msgLen) { SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask)); if (pTask == NULL) { return -1; @@ -99,16 +103,19 @@ int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, char* msg, int32_t msgLe goto FAIL; } - taosHashPut(pMeta->pTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*)); + if (taosHashPut(pMeta->pTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*)) < 0) { + goto FAIL; + } if (tdbTbUpsert(pMeta->pTaskDb, &pTask->taskId, sizeof(int32_t), msg, msgLen, &pMeta->txn) < 0) { + taosHashRemove(pMeta->pTasks, &pTask->taskId, sizeof(int32_t)); ASSERT(0); - return -1; + goto FAIL; } return 0; FAIL: - if (pTask) taosMemoryFree(pTask); + if (pTask) tFreeSStreamTask(pTask); return -1; } @@ -158,11 +165,28 @@ int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId) { SStreamTask* pTask = *ppTask; taosHashRemove(pMeta->pTasks, &taskId, sizeof(int32_t)); atomic_store_8(&pTask->taskStatus, TASK_STATUS__DROPPING); - } - if (tdbTbDelete(pMeta->pTaskDb, &taskId, sizeof(int32_t), &pMeta->txn) < 0) { - /*return -1;*/ + if (tdbTbDelete(pMeta->pTaskDb, &taskId, sizeof(int32_t), &pMeta->txn) < 0) { + /*return -1;*/ + } + + if (pTask->triggerParam != 0) { + taosTmrStop(pTask->timer); + } + + while (1) { + int8_t schedStatus = + atomic_val_compare_exchange_8(&pTask->schedStatus, TASK_SCHED_STATUS__INACTIVE, TASK_SCHED_STATUS__DROPPING); + if (schedStatus == TASK_SCHED_STATUS__INACTIVE) { + tFreeSStreamTask(pTask); + break; + } else if (schedStatus == TASK_SCHED_STATUS__DROPPING) { + break; + } + taosMsleep(10); + } } + return 0; } @@ -241,6 +265,8 @@ int32_t streamLoadTasks(SStreamMeta* pMeta) { } } + tdbFree(pKey); + tdbFree(pVal); if (tdbTbcClose(pCur) < 0) { return -1; } diff --git a/source/libs/stream/src/streamQueue.c b/source/libs/stream/src/streamQueue.c index 6819e5329fdcbac011ae258e89e2f665f85ebbca..ac10c8258744f178bd2010543176f545b40c88b6 100644 --- a/source/libs/stream/src/streamQueue.c +++ b/source/libs/stream/src/streamQueue.c @@ -13,7 +13,7 @@ * along with this program. If not, see . */ -#include "tstream.h" +#include "streamInc.h" SStreamQueue* streamQueueOpen() { SStreamQueue* pQueue = taosMemoryCalloc(1, sizeof(SStreamQueue)); @@ -36,9 +36,12 @@ void streamQueueClose(SStreamQueue* queue) { while (1) { void* qItem = streamQueueNextItem(queue); if (qItem) { - taosFreeQitem(qItem); + streamFreeQitem(qItem); } else { - return; + break; } } + taosFreeQall(queue->qall); + taosCloseQueue(queue->queue); + taosMemoryFree(queue); } diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index 263053778b1ae94de5a5353edf158e37604baf98..0505c3edd6dd8211792679b7164bcc001bde6c4e 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -176,6 +176,7 @@ int32_t tDecodeSStreamTaskRecoverRsp(SDecoder* pDecoder, SStreamRecoverDownstrea } int32_t streamSaveStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) { +#if 0 void* buf = NULL; ASSERT(pTask->taskLevel == TASK_LEVEL__SINK); @@ -224,10 +225,12 @@ int32_t streamSaveStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) { FAIL: if (buf) taosMemoryFree(buf); return -1; +#endif return 0; } int32_t streamLoadStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) { +#if 0 void* pVal = NULL; int32_t vLen = 0; if (tdbTbGet(pMeta->pStateDb, &pTask->taskId, sizeof(void*), &pVal, &vLen) < 0) { @@ -241,7 +244,7 @@ int32_t streamLoadStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) { pTask->nextCheckId = aggCheckpoint.checkpointId + 1; pTask->checkpointInfo = aggCheckpoint.checkpointVer; - +#endif return 0; } diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c new file mode 100644 index 0000000000000000000000000000000000000000..dfd6f012cc4f64d252f75a20f761c6f87fc05b78 --- /dev/null +++ b/source/libs/stream/src/streamState.c @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "executor.h" +#include "streamInc.h" +#include "tcommon.h" +#include "ttimer.h" + +SStreamState* streamStateOpen(char* path, SStreamTask* pTask) { + SStreamState* pState = taosMemoryCalloc(1, sizeof(SStreamState)); + if (pState == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + char statePath[300]; + sprintf(statePath, "%s/%d", path, pTask->taskId); + if (tdbOpen(statePath, 4096, 256, &pState->db) < 0) { + goto _err; + } + + // open state storage backend + if (tdbTbOpen("state.db", sizeof(SWinKey), -1, SWinKeyCmpr, pState->db, &pState->pStateDb) < 0) { + goto _err; + } + + if (streamStateBegin(pState) < 0) { + goto _err; + } + + pState->pOwner = pTask; + + return pState; + +_err: + if (pState->pStateDb) tdbTbClose(pState->pStateDb); + if (pState->db) tdbClose(pState->db); + taosMemoryFree(pState); + return NULL; +} + +void streamStateClose(SStreamState* pState) { + tdbCommit(pState->db, &pState->txn); + tdbTbClose(pState->pStateDb); + tdbClose(pState->db); + + taosMemoryFree(pState); +} + +int32_t streamStateBegin(SStreamState* pState) { + if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < + 0) { + return -1; + } + + if (tdbBegin(pState->db, &pState->txn) < 0) { + tdbTxnClose(&pState->txn); + return -1; + } + return 0; +} + +int32_t streamStateCommit(SStreamState* pState) { + if (tdbCommit(pState->db, &pState->txn) < 0) { + return -1; + } + memset(&pState->txn, 0, sizeof(TXN)); + if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < + 0) { + return -1; + } + if (tdbBegin(pState->db, &pState->txn) < 0) { + return -1; + } + return 0; +} + +int32_t streamStateAbort(SStreamState* pState) { + if (tdbAbort(pState->db, &pState->txn) < 0) { + return -1; + } + memset(&pState->txn, 0, sizeof(TXN)); + if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < + 0) { + return -1; + } + if (tdbBegin(pState->db, &pState->txn) < 0) { + return -1; + } + return 0; +} + +int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) { + return tdbTbUpsert(pState->pStateDb, key, sizeof(SWinKey), value, vLen, &pState->txn); +} +int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) { + return tdbTbGet(pState->pStateDb, key, sizeof(SWinKey), pVal, pVLen); +} + +int32_t streamStateDel(SStreamState* pState, const SWinKey* key) { + return tdbTbDelete(pState->pStateDb, key, sizeof(SWinKey), &pState->txn); +} + +SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key) { + SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur)); + if (pCur == NULL) return NULL; + tdbTbcOpen(pState->pStateDb, &pCur->pCur, NULL); + + int32_t c; + tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c); + if (c != 0) { + taosMemoryFree(pCur); + return NULL; + } + return pCur; +} + +int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) { + const SWinKey* pKTmp = NULL; + int32_t kLen; + if (tdbTbcGet(pCur->pCur, (const void**)&pKTmp, &kLen, pVal, pVLen) < 0) { + return -1; + } + *pKey = *pKTmp; + return 0; +} + +int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur) { + // + return tdbTbcMoveToFirst(pCur->pCur); +} + +int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur) { + // + return tdbTbcMoveToLast(pCur->pCur); +} + +SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key) { + SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur)); + if (pCur == NULL) { + return NULL; + } + + int32_t c; + if (tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c) < 0) { + taosMemoryFree(pCur); + return NULL; + } + if (c > 0) return pCur; + + if (tdbTbcMoveToNext(pCur->pCur) < 0) { + taosMemoryFree(pCur); + return NULL; + } + + return pCur; +} + +SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key) { + SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur)); + if (pCur == NULL) { + return NULL; + } + + int32_t c; + if (tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c) < 0) { + taosMemoryFree(pCur); + return NULL; + } + if (c < 0) return pCur; + + if (tdbTbcMoveToPrev(pCur->pCur) < 0) { + taosMemoryFree(pCur); + return NULL; + } + + return pCur; +} + +int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur) { + // + return tdbTbcMoveToNext(pCur->pCur); +} + +int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur) { + // + return tdbTbcMoveToPrev(pCur->pCur); +} +void streamStateFreeCur(SStreamStateCur* pCur) { + tdbTbcClose(pCur->pCur); + taosMemoryFree(pCur); +} + +void streamFreeVal(void* val) { tdbFree(val); } diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index 638d39e5cc386ccd5922936dc9606f9b3aa2d8cd..ce5917de296c317f739e79cb78cda21660769aa8 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -152,9 +152,21 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask) { } void tFreeSStreamTask(SStreamTask* pTask) { - streamQueueClose(pTask->inputQueue); - streamQueueClose(pTask->outputQueue); + qDebug("free stream task %d", pTask->taskId); + if (pTask->inputQueue) streamQueueClose(pTask->inputQueue); + if (pTask->outputQueue) streamQueueClose(pTask->outputQueue); if (pTask->exec.qmsg) taosMemoryFree(pTask->exec.qmsg); if (pTask->exec.executor) qDestroyTask(pTask->exec.executor); + taosArrayDestroyP(pTask->childEpInfo, taosMemoryFree); + if (pTask->outputType == TASK_OUTPUT__TABLE) { + tDeleteSSchemaWrapper(pTask->tbSink.pSchemaWrapper); + taosMemoryFree(pTask->tbSink.pTSchema); + } + if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) { + taosArrayDestroy(pTask->shuffleDispatcher.dbInfo.pVgroupInfos); + } + + if (pTask->pState) streamStateClose(pTask->pState); + taosMemoryFree(pTask); } diff --git a/source/libs/stream/src/streamUpdate.c b/source/libs/stream/src/streamUpdate.c index 0b1ce27b7702e23066ff27fdd0c78e9d7dc8cd6e..d053662bd30287d5d9589a3881c8588fd3eb82ec 100644 --- a/source/libs/stream/src/streamUpdate.c +++ b/source/libs/stream/src/streamUpdate.c @@ -13,33 +13,31 @@ * along with this program. If not, see . */ -#include "tstreamUpdate.h" +#include "query.h" #include "tencode.h" +#include "tstreamUpdate.h" #include "ttime.h" -#include "query.h" -#define DEFAULT_FALSE_POSITIVE 0.01 -#define DEFAULT_BUCKET_SIZE 1310720 -#define DEFAULT_MAP_CAPACITY 1310720 -#define DEFAULT_MAP_SIZE (DEFAULT_MAP_CAPACITY * 10) -#define ROWS_PER_MILLISECOND 1 -#define MAX_NUM_SCALABLE_BF 100000 -#define MIN_NUM_SCALABLE_BF 10 -#define DEFAULT_PREADD_BUCKET 1 -#define MAX_INTERVAL MILLISECOND_PER_MINUTE -#define MIN_INTERVAL (MILLISECOND_PER_SECOND * 10) -#define DEFAULT_EXPECTED_ENTRIES 10000 - -static int64_t adjustExpEntries(int64_t entries) { - return TMIN(DEFAULT_EXPECTED_ENTRIES, entries); -} +#define DEFAULT_FALSE_POSITIVE 0.01 +#define DEFAULT_BUCKET_SIZE 1310720 +#define DEFAULT_MAP_CAPACITY 1310720 +#define DEFAULT_MAP_SIZE (DEFAULT_MAP_CAPACITY * 10) +#define ROWS_PER_MILLISECOND 1 +#define MAX_NUM_SCALABLE_BF 100000 +#define MIN_NUM_SCALABLE_BF 10 +#define DEFAULT_PREADD_BUCKET 1 +#define MAX_INTERVAL MILLISECOND_PER_MINUTE +#define MIN_INTERVAL (MILLISECOND_PER_SECOND * 10) +#define DEFAULT_EXPECTED_ENTRIES 10000 + +static int64_t adjustExpEntries(int64_t entries) { return TMIN(DEFAULT_EXPECTED_ENTRIES, entries); } static void windowSBfAdd(SUpdateInfo *pInfo, uint64_t count) { if (pInfo->numSBFs < count) { count = pInfo->numSBFs; } for (uint64_t i = 0; i < count; ++i) { - int64_t rows = adjustExpEntries(pInfo->interval * ROWS_PER_MILLISECOND); + int64_t rows = adjustExpEntries(pInfo->interval * ROWS_PER_MILLISECOND); SScalableBf *tsSBF = tScalableBfInit(rows, DEFAULT_FALSE_POSITIVE); taosArrayPush(pInfo->pTsSBFs, &tsSBF); } @@ -78,7 +76,7 @@ static int64_t adjustInterval(int64_t interval, int32_t precision) { static int64_t adjustWatermark(int64_t adjInterval, int64_t originInt, int64_t watermark) { if (watermark <= adjInterval) { - watermark = TMAX(originInt/adjInterval, 1) * adjInterval; + watermark = TMAX(originInt / adjInterval, 1) * adjInterval; } else if (watermark > MAX_NUM_SCALABLE_BF * adjInterval) { watermark = MAX_NUM_SCALABLE_BF * adjInterval; }/* else if (watermark < MIN_NUM_SCALABLE_BF * adjInterval) { @@ -158,11 +156,17 @@ static SScalableBf *getSBf(SUpdateInfo *pInfo, TSKEY ts) { return res; } +bool updateInfoIsTableInserted(SUpdateInfo *pInfo, int64_t tbUid) { + void *pVal = taosHashGet(pInfo->pMap, &tbUid, sizeof(int64_t)); + if (pVal || taosHashGetSize(pInfo->pMap) >= DEFAULT_MAP_SIZE) return true; + return false; +} + bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts) { - int32_t res = TSDB_CODE_FAILED; - TSKEY* pMapMaxTs = taosHashGet(pInfo->pMap, &tableId, sizeof(uint64_t)); - uint64_t index = ((uint64_t)tableId) % pInfo->numBuckets; - TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index); + int32_t res = TSDB_CODE_FAILED; + TSKEY *pMapMaxTs = taosHashGet(pInfo->pMap, &tableId, sizeof(uint64_t)); + uint64_t index = ((uint64_t)tableId) % pInfo->numBuckets; + TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index); if (ts < maxTs - pInfo->watermark) { // this window has been closed. if (pInfo->pCloseWinSBF) { @@ -178,42 +182,47 @@ bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts) { } int32_t size = taosHashGetSize(pInfo->pMap); - if ( (!pMapMaxTs && size < DEFAULT_MAP_SIZE) || (pMapMaxTs && *pMapMaxTs < ts)) { + if ((!pMapMaxTs && size < DEFAULT_MAP_SIZE) || (pMapMaxTs && *pMapMaxTs < ts)) { taosHashPut(pInfo->pMap, &tableId, sizeof(uint64_t), &ts, sizeof(TSKEY)); return false; } - if ( !pMapMaxTs && maxTs < ts ) { + if (!pMapMaxTs && maxTs < ts) { taosArraySet(pInfo->pTsBuckets, index, &ts); return false; } if (ts < pInfo->minTS) { - qDebug("===stream===Update. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64 , tableId, maxTs, *pMapMaxTs, ts); + qDebug("===stream===Update. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64, tableId, + maxTs, *pMapMaxTs, ts); return true; } else if (res == TSDB_CODE_SUCCESS) { return false; } - qDebug("===stream===Update. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64 , tableId, maxTs, *pMapMaxTs, ts); + qDebug("===stream===Update. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64, tableId, + maxTs, *pMapMaxTs, ts); // check from tsdb api return true; } -void updateInfoSetScanRange(SUpdateInfo *pInfo, STimeWindow* pWin, uint64_t groupId, uint64_t version) { - qDebug("===stream===groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64 , groupId, pWin->skey, pWin->ekey, version); +void updateInfoSetScanRange(SUpdateInfo *pInfo, STimeWindow *pWin, uint64_t groupId, uint64_t version) { + qDebug("===stream===groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64, groupId, + pWin->skey, pWin->ekey, version); pInfo->scanWindow = *pWin; pInfo->scanGroupId = groupId; pInfo->maxVersion = version; } -bool updateInfoIgnore(SUpdateInfo *pInfo, STimeWindow* pWin, uint64_t groupId, uint64_t version) { +bool updateInfoIgnore(SUpdateInfo *pInfo, STimeWindow *pWin, uint64_t groupId, uint64_t version) { if (!pInfo) { return false; } - qDebug("===stream===check groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64 , groupId, pWin->skey, pWin->ekey, version); - if (pInfo->scanGroupId == groupId && pInfo->scanWindow.skey <= pWin->skey && - pWin->ekey <= pInfo->scanWindow.ekey && version <= pInfo->maxVersion ) { - qDebug("===stream===ignore groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64 , groupId, pWin->skey, pWin->ekey, version); + qDebug("===stream===check groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64, groupId, + pWin->skey, pWin->ekey, version); + if (pInfo->scanGroupId == groupId && pInfo->scanWindow.skey <= pWin->skey && pWin->ekey <= pInfo->scanWindow.ekey && + version <= pInfo->maxVersion) { + qDebug("===stream===ignore groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64, groupId, + pWin->skey, pWin->ekey, version); return true; } return false; @@ -261,7 +270,7 @@ int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo) int32_t size = taosArrayGetSize(pInfo->pTsBuckets); if (tEncodeI32(&encoder, size) < 0) return -1; for (int32_t i = 0; i < size; i++) { - TSKEY* pTs = (TSKEY*)taosArrayGet(pInfo->pTsBuckets, i); + TSKEY *pTs = (TSKEY *)taosArrayGet(pInfo->pTsBuckets, i); if (tEncodeI64(&encoder, *pTs) < 0) return -1; } @@ -270,7 +279,7 @@ int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo) int32_t sBfSize = taosArrayGetSize(pInfo->pTsSBFs); if (tEncodeI32(&encoder, sBfSize) < 0) return -1; for (int32_t i = 0; i < sBfSize; i++) { - SScalableBf* pSBf = taosArrayGetP(pInfo->pTsSBFs, i); + SScalableBf *pSBf = taosArrayGetP(pInfo->pTsSBFs, i); if (tScalableBfEncode(pSBf, &encoder) < 0) return -1; } @@ -278,17 +287,17 @@ int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo) if (tEncodeI64(&encoder, pInfo->interval) < 0) return -1; if (tEncodeI64(&encoder, pInfo->watermark) < 0) return -1; if (tEncodeI64(&encoder, pInfo->minTS) < 0) return -1; - + if (tScalableBfEncode(pInfo->pCloseWinSBF, &encoder) < 0) return -1; int32_t mapSize = taosHashGetSize(pInfo->pMap); if (tEncodeI32(&encoder, mapSize) < 0) return -1; - void* pIte = NULL; + void *pIte = NULL; size_t keyLen = 0; while ((pIte = taosHashIterate(pInfo->pMap, pIte)) != NULL) { - void* key = taosHashGetKey(pIte, &keyLen); - if (tEncodeU64(&encoder, *(uint64_t*)key) < 0) return -1; - if (tEncodeI64(&encoder, *(TSKEY*)pIte) < 0) return -1; + void *key = taosHashGetKey(pIte, &keyLen); + if (tEncodeU64(&encoder, *(uint64_t *)key) < 0) return -1; + if (tEncodeI64(&encoder, *(TSKEY *)pIte) < 0) return -1; } if (tEncodeI64(&encoder, pInfo->scanWindow.skey) < 0) return -1; @@ -311,7 +320,7 @@ int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo) { int32_t size = 0; if (tDecodeI32(&decoder, &size) < 0) return -1; - pInfo->pTsBuckets = taosArrayInit(size, sizeof(TSKEY)); + pInfo->pTsBuckets = taosArrayInit(size, sizeof(TSKEY)); TSKEY ts = INT64_MIN; for (int32_t i = 0; i < size; i++) { if (tDecodeI64(&decoder, &ts) < 0) return -1; @@ -324,7 +333,7 @@ int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo) { if (tDecodeI32(&decoder, &sBfSize) < 0) return -1; pInfo->pTsSBFs = taosArrayInit(sBfSize, sizeof(void *)); for (int32_t i = 0; i < sBfSize; i++) { - SScalableBf* pSBf = tScalableBfDecode(&decoder); + SScalableBf *pSBf = tScalableBfDecode(&decoder); if (!pSBf) return -1; taosArrayPush(pInfo->pTsSBFs, &pSBf); } @@ -337,11 +346,11 @@ int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo) { int32_t mapSize = 0; if (tDecodeI32(&decoder, &mapSize) < 0) return -1; - _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT); pInfo->pMap = taosHashInit(mapSize, hashFn, true, HASH_NO_LOCK); uint64_t uid = 0; ts = INT64_MIN; - for(int32_t i = 0; i < mapSize; i++) { + for (int32_t i = 0; i < mapSize; i++) { if (tDecodeU64(&decoder, &uid) < 0) return -1; if (tDecodeI64(&decoder, &ts) < 0) return -1; taosHashPut(pInfo->pMap, &uid, sizeof(uint64_t), &ts, sizeof(TSKEY)); diff --git a/source/libs/sync/inc/syncIndexMgr.h b/source/libs/sync/inc/syncIndexMgr.h index 1f60a9d57e75c86bce5cd57d5a04540681c9df3c..fb85b89419a8b09a4e60d174d51791acf5a5b6ae 100644 --- a/source/libs/sync/inc/syncIndexMgr.h +++ b/source/libs/sync/inc/syncIndexMgr.h @@ -29,8 +29,12 @@ extern "C" { // SIndexMgr ----------------------------- typedef struct SSyncIndexMgr { SRaftId (*replicas)[TSDB_MAX_REPLICA]; - SyncIndex index[TSDB_MAX_REPLICA]; - SyncTerm privateTerm[TSDB_MAX_REPLICA]; // for advanced function + SyncIndex index[TSDB_MAX_REPLICA]; + SyncTerm privateTerm[TSDB_MAX_REPLICA]; // for advanced function + + int64_t startTimeArr[TSDB_MAX_REPLICA]; + int64_t recvTimeArr[TSDB_MAX_REPLICA]; + int32_t replicaNum; SSyncNode *pSyncNode; } SSyncIndexMgr; @@ -41,8 +45,13 @@ void syncIndexMgrDestroy(SSyncIndexMgr *pSyncIndexMgr); void syncIndexMgrClear(SSyncIndexMgr *pSyncIndexMgr); void syncIndexMgrSetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, SyncIndex index); SyncIndex syncIndexMgrGetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId); -cJSON * syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr); -char * syncIndexMgr2Str(SSyncIndexMgr *pSyncIndexMgr); +cJSON *syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr); +char *syncIndexMgr2Str(SSyncIndexMgr *pSyncIndexMgr); + +void syncIndexMgrSetStartTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, int64_t startTime); +int64_t syncIndexMgrGetStartTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId); +void syncIndexMgrSetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, int64_t recvTime); +int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId); // void syncIndexMgrSetTerm(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, SyncTerm term); // SyncTerm syncIndexMgrGetTerm(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId); diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h index 3e247e5d795f99a11013fa045f1a0585cc6dc168..0afc373f2d0d90333002245e43817d12ef02df8c 100644 --- a/source/libs/sync/inc/syncInt.h +++ b/source/libs/sync/inc/syncInt.h @@ -237,7 +237,7 @@ void syncNodeVoteForSelf(SSyncNode* pSyncNode); bool syncNodeHasSnapshot(SSyncNode* pSyncNode); void syncNodeMaybeUpdateCommitBySnapshot(SSyncNode* pSyncNode); -SyncIndex syncNodeGetLastIndex(SSyncNode* pSyncNode); +SyncIndex syncNodeGetLastIndex(const SSyncNode* pSyncNode); SyncTerm syncNodeGetLastTerm(SSyncNode* pSyncNode); int32_t syncNodeGetLastIndexTerm(SSyncNode* pSyncNode, SyncIndex* pLastIndex, SyncTerm* pLastTerm); @@ -269,6 +269,8 @@ int32_t syncNodeLeaderTransfer(SSyncNode* pSyncNode); int32_t syncNodeLeaderTransferTo(SSyncNode* pSyncNode, SNodeInfo newLeader); int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* pEntry); +int32_t syncNodeDynamicQuorum(const SSyncNode* pSyncNode); + // trace log void syncLogSendRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, const char* s); void syncLogRecvRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, const char* s); diff --git a/source/libs/sync/inc/syncReplication.h b/source/libs/sync/inc/syncReplication.h index 21821be6c73db67e52ee0ad2c8de33c6bdc39f49..edce124ee52fefb07d52ae7fc27038c0b14f9b35 100644 --- a/source/libs/sync/inc/syncReplication.h +++ b/source/libs/sync/inc/syncReplication.h @@ -55,6 +55,8 @@ int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode); int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode); int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode); +int32_t syncNodeAppendEntriesOnePeer(SSyncNode* pSyncNode, SRaftId* pDestId, SyncIndex nextIndex); + int32_t syncNodeReplicate(SSyncNode* pSyncNode, bool isTimer); int32_t syncNodeAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntries* pMsg); int32_t syncNodeAppendEntriesBatch(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntriesBatch* pMsg); diff --git a/source/libs/sync/inc/syncSnapshot.h b/source/libs/sync/inc/syncSnapshot.h index 0dc67cf15077cee2558b582b33b358583b9e8aab..6fb558e45c66e13646c26b20e20bf60a7ded1d26 100644 --- a/source/libs/sync/inc/syncSnapshot.h +++ b/source/libs/sync/inc/syncSnapshot.h @@ -28,10 +28,10 @@ extern "C" { #include "syncMessage.h" #include "taosdef.h" -#define SYNC_SNAPSHOT_SEQ_INVALID -1 +#define SYNC_SNAPSHOT_SEQ_INVALID -1 #define SYNC_SNAPSHOT_SEQ_FORCE_CLOSE -2 -#define SYNC_SNAPSHOT_SEQ_BEGIN 0 -#define SYNC_SNAPSHOT_SEQ_END 0x7FFFFFFF +#define SYNC_SNAPSHOT_SEQ_BEGIN 0 +#define SYNC_SNAPSHOT_SEQ_END 0x7FFFFFFF #define SYNC_SNAPSHOT_RETRY_MS 5000 @@ -40,14 +40,14 @@ typedef struct SSyncSnapshotSender { bool start; int32_t seq; int32_t ack; - void * pReader; - void * pCurrentBlock; + void *pReader; + void *pCurrentBlock; int32_t blockLen; SSnapshotParam snapshotParam; SSnapshot snapshot; SSyncCfg lastConfig; int64_t sendingMS; - SSyncNode * pSyncNode; + SSyncNode *pSyncNode; int32_t replicaIndex; SyncTerm term; SyncTerm privateTerm; @@ -64,20 +64,20 @@ int32_t snapshotSend(SSyncSnapshotSender *pSender); int32_t snapshotReSend(SSyncSnapshotSender *pSender); cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender); -char * snapshotSender2Str(SSyncSnapshotSender *pSender); -char * snapshotSender2SimpleStr(SSyncSnapshotSender *pSender, char *event); +char *snapshotSender2Str(SSyncSnapshotSender *pSender); +char *snapshotSender2SimpleStr(SSyncSnapshotSender *pSender, char *event); //--------------------------------------------------- typedef struct SSyncSnapshotReceiver { bool start; int32_t ack; - void * pWriter; + void *pWriter; SyncTerm term; SyncTerm privateTerm; SSnapshotParam snapshotParam; SSnapshot snapshot; SRaftId fromId; - SSyncNode * pSyncNode; + SSyncNode *pSyncNode; } SSyncSnapshotReceiver; @@ -86,10 +86,11 @@ void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver) int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg); int32_t snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver); bool snapshotReceiverIsStart(SSyncSnapshotReceiver *pReceiver); +void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver); cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver); -char * snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver); -char * snapshotReceiver2SimpleStr(SSyncSnapshotReceiver *pReceiver, char *event); +char *snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver); +char *snapshotReceiver2SimpleStr(SSyncSnapshotReceiver *pReceiver, char *event); //--------------------------------------------------- // on message diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index 4f93d8197dc801ae86619858b15d9055059565eb..e000ba8bf869454a0276411d7312190be98348b9 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -148,6 +148,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { pReply->term = ths->pRaftStore->currentTerm; pReply->success = false; pReply->matchIndex = SYNC_INDEX_INVALID; + pReply->startTime = ths->startTime; // msg event log syncLogSendAppendEntriesReply(ths, pReply, ""); @@ -290,6 +291,8 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { pReply->matchIndex = pMsg->prevLogIndex; } + pReply->startTime = ths->startTime; + // msg event log syncLogSendAppendEntriesReply(ths, pReply, ""); @@ -603,6 +606,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc pReply->privateTerm = ths->pNewNodeReceiver->privateTerm; pReply->success = true; pReply->matchIndex = matchIndex; + pReply->startTime = ths->startTime; // msg event log syncLogSendAppendEntriesReply(ths, pReply, ""); @@ -651,6 +655,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc pReply->privateTerm = ths->pNewNodeReceiver->privateTerm; pReply->success = false; pReply->matchIndex = ths->commitIndex; + pReply->startTime = ths->startTime; // msg event log syncLogSendAppendEntriesReply(ths, pReply, ""); @@ -729,6 +734,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc pReply->privateTerm = ths->pNewNodeReceiver->privateTerm; pReply->success = true; pReply->matchIndex = hasAppendEntries ? pMsg->prevLogIndex + pMsg->dataCount : pMsg->prevLogIndex; + pReply->startTime = ths->startTime; // msg event log syncLogSendAppendEntriesReply(ths, pReply, ""); @@ -874,6 +880,7 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs pReply->privateTerm = ths->pNewNodeReceiver->privateTerm; pReply->success = true; pReply->matchIndex = matchIndex; + pReply->startTime = ths->startTime; // msg event log syncLogSendAppendEntriesReply(ths, pReply, ""); @@ -919,6 +926,7 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs pReply->privateTerm = ths->pNewNodeReceiver->privateTerm; pReply->success = false; pReply->matchIndex = SYNC_INDEX_INVALID; + pReply->startTime = ths->startTime; // msg event log syncLogSendAppendEntriesReply(ths, pReply, ""); @@ -984,6 +992,7 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs pReply->privateTerm = ths->pNewNodeReceiver->privateTerm; pReply->success = true; pReply->matchIndex = hasAppendEntries ? pMsg->prevLogIndex + 1 : pMsg->prevLogIndex; + pReply->startTime = ths->startTime; // msg event log syncLogSendAppendEntriesReply(ths, pReply, ""); diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c index 4928c54bd72acc079f2d4e4c42aac49d1ce33e82..9253ed0129751c2e0d0538602dc124b38a72447d 100644 --- a/source/libs/sync/src/syncAppendEntriesReply.c +++ b/source/libs/sync/src/syncAppendEntriesReply.c @@ -64,6 +64,10 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p ASSERT(pMsg->term == ths->pRaftStore->currentTerm); + // update time + syncIndexMgrSetStartTime(ths->pNextIndex, &(pMsg->srcId), pMsg->startTime); + syncIndexMgrSetRecvTime(ths->pNextIndex, &(pMsg->srcId), taosGetTimestampMs()); + SyncIndex beforeNextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId)); SyncIndex beforeMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId)); @@ -170,6 +174,10 @@ int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntrie ASSERT(pMsg->term == ths->pRaftStore->currentTerm); + // update time + syncIndexMgrSetStartTime(ths->pNextIndex, &(pMsg->srcId), pMsg->startTime); + syncIndexMgrSetRecvTime(ths->pNextIndex, &(pMsg->srcId), taosGetTimestampMs()); + SyncIndex beforeNextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId)); SyncIndex beforeMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId)); @@ -330,6 +338,10 @@ int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntries ASSERT(pMsg->term == ths->pRaftStore->currentTerm); + // update time + syncIndexMgrSetStartTime(ths->pNextIndex, &(pMsg->srcId), pMsg->startTime); + syncIndexMgrSetRecvTime(ths->pNextIndex, &(pMsg->srcId), taosGetTimestampMs()); + SyncIndex beforeNextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId)); SyncIndex beforeMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId)); diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c index 3a94ed9713ba2b12e2ce766b3dfd9e615b309d9f..1e68fe346c71d65e0594643da5b94e2dd1ab204d 100644 --- a/source/libs/sync/src/syncCommit.c +++ b/source/libs/sync/src/syncCommit.c @@ -133,6 +133,87 @@ bool syncAgreeIndex(SSyncNode* pSyncNode, SRaftId* pRaftId, SyncIndex index) { return false; } +static inline int64_t syncNodeAbs64(int64_t a, int64_t b) { + ASSERT(a >= 0); + ASSERT(b >= 0); + + int64_t c = a > b ? a - b : b - a; + return c; +} + +int32_t syncNodeDynamicQuorum(const SSyncNode* pSyncNode) { + int32_t quorum = 1; // self + + int64_t timeNow = taosGetTimestampMs(); + for (int i = 0; i < pSyncNode->peersNum; ++i) { + int64_t peerStartTime = syncIndexMgrGetStartTime(pSyncNode->pNextIndex, &(pSyncNode->peersId)[i]); + int64_t peerRecvTime = syncIndexMgrGetRecvTime(pSyncNode->pNextIndex, &(pSyncNode->peersId)[i]); + SyncIndex peerMatchIndex = syncIndexMgrGetIndex(pSyncNode->pMatchIndex, &(pSyncNode->peersId)[i]); + + int64_t recvTimeDiff = TABS(peerRecvTime - timeNow); + int64_t startTimeDiff = TABS(peerStartTime - pSyncNode->startTime); + int64_t logDiff = TABS(peerMatchIndex - syncNodeGetLastIndex(pSyncNode)); + + /* + int64_t recvTimeDiff = syncNodeAbs64(peerRecvTime, timeNow); + int64_t startTimeDiff = syncNodeAbs64(peerStartTime, pSyncNode->startTime); + int64_t logDiff = syncNodeAbs64(peerMatchIndex, syncNodeGetLastIndex(pSyncNode)); + */ + + int32_t addQuorum = 0; + + if (recvTimeDiff < SYNC_MAX_RECV_TIME_RANGE_MS) { + if (startTimeDiff < SYNC_MAX_START_TIME_RANGE_MS) { + addQuorum = 1; + } else { + if (logDiff < SYNC_ADD_QUORUM_COUNT) { + addQuorum = 1; + } else { + addQuorum = 0; + } + } + } else { + addQuorum = 0; + } + + /* + if (recvTimeDiff < SYNC_MAX_RECV_TIME_RANGE_MS) { + addQuorum = 1; + } else { + addQuorum = 0; + } + + if (startTimeDiff > SYNC_MAX_START_TIME_RANGE_MS) { + addQuorum = 0; + } + */ + + quorum += addQuorum; + } + + ASSERT(quorum <= pSyncNode->replicaNum); + + if (quorum < pSyncNode->quorum) { + quorum = pSyncNode->quorum; + } + + return quorum; +} + +bool syncAgree(SSyncNode* pSyncNode, SyncIndex index) { + int agreeCount = 0; + for (int i = 0; i < pSyncNode->replicaNum; ++i) { + if (syncAgreeIndex(pSyncNode, &(pSyncNode->replicasId[i]), index)) { + ++agreeCount; + } + if (agreeCount >= syncNodeDynamicQuorum(pSyncNode)) { + return true; + } + } + return false; +} + +/* bool syncAgree(SSyncNode* pSyncNode, SyncIndex index) { int agreeCount = 0; for (int i = 0; i < pSyncNode->replicaNum; ++i) { @@ -145,3 +226,4 @@ bool syncAgree(SSyncNode* pSyncNode, SyncIndex index) { } return false; } +*/ diff --git a/source/libs/sync/src/syncIndexMgr.c b/source/libs/sync/src/syncIndexMgr.c index 8c820fcd9cabf951bd92a0af7fcc4940faa20ccc..07c4fa8429dc539609d3ae788caab3352b0a3e60 100644 --- a/source/libs/sync/src/syncIndexMgr.c +++ b/source/libs/sync/src/syncIndexMgr.c @@ -47,6 +47,13 @@ void syncIndexMgrDestroy(SSyncIndexMgr *pSyncIndexMgr) { void syncIndexMgrClear(SSyncIndexMgr *pSyncIndexMgr) { memset(pSyncIndexMgr->index, 0, sizeof(pSyncIndexMgr->index)); memset(pSyncIndexMgr->privateTerm, 0, sizeof(pSyncIndexMgr->privateTerm)); + + // int64_t timeNow = taosGetMonotonicMs(); + for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) { + pSyncIndexMgr->startTimeArr[i] = 0; + pSyncIndexMgr->recvTimeArr[i] = 0; + } + /* for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) { pSyncIndexMgr->index[i] = 0; @@ -68,7 +75,8 @@ void syncIndexMgrSetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, char host[128]; uint16_t port; syncUtilU642Addr(pRaftId->addr, host, sizeof(host), &port); - sError("vgId:%d, index mgr set for %s:%d, index:%" PRId64 " error", pSyncIndexMgr->pSyncNode->vgId, host, port, index); + sError("vgId:%d, index mgr set for %s:%d, index:%" PRId64 " error", pSyncIndexMgr->pSyncNode->vgId, host, port, + index); } SyncIndex syncIndexMgrGetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId) { @@ -125,11 +133,65 @@ cJSON *syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr) { char *syncIndexMgr2Str(SSyncIndexMgr *pSyncIndexMgr) { cJSON *pJson = syncIndexMgr2Json(pSyncIndexMgr); - char * serialized = cJSON_Print(pJson); + char *serialized = cJSON_Print(pJson); cJSON_Delete(pJson); return serialized; } +void syncIndexMgrSetStartTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, int64_t startTime) { + for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) { + if (syncUtilSameId(&((*(pSyncIndexMgr->replicas))[i]), pRaftId)) { + (pSyncIndexMgr->startTimeArr)[i] = startTime; + return; + } + } + + // maybe config change + // ASSERT(0); + char host[128]; + uint16_t port; + syncUtilU642Addr(pRaftId->addr, host, sizeof(host), &port); + sError("vgId:%d, index mgr set for %s:%d, start-time:%" PRId64 " error", pSyncIndexMgr->pSyncNode->vgId, host, port, + startTime); +} + +int64_t syncIndexMgrGetStartTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId) { + for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) { + if (syncUtilSameId(&((*(pSyncIndexMgr->replicas))[i]), pRaftId)) { + int64_t startTime = (pSyncIndexMgr->startTimeArr)[i]; + return startTime; + } + } + ASSERT(0); +} + +void syncIndexMgrSetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, int64_t recvTime) { + for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) { + if (syncUtilSameId(&((*(pSyncIndexMgr->replicas))[i]), pRaftId)) { + (pSyncIndexMgr->recvTimeArr)[i] = recvTime; + return; + } + } + + // maybe config change + // ASSERT(0); + char host[128]; + uint16_t port; + syncUtilU642Addr(pRaftId->addr, host, sizeof(host), &port); + sError("vgId:%d, index mgr set for %s:%d, recv-time:%" PRId64 " error", pSyncIndexMgr->pSyncNode->vgId, host, port, + recvTime); +} + +int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId) { + for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) { + if (syncUtilSameId(&((*(pSyncIndexMgr->replicas))[i]), pRaftId)) { + int64_t recvTime = (pSyncIndexMgr->recvTimeArr)[i]; + return recvTime; + } + } + ASSERT(0); +} + // for debug ------------------- void syncIndexMgrPrint(SSyncIndexMgr *pObj) { char *serialized = syncIndexMgr2Str(pObj); diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index c7784cd62e29cc22271de137204f69be8023581b..51098374b03531142c9c12443fa5b02efddc3aca 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -392,6 +392,29 @@ bool syncIsReady(int64_t rid) { return b; } +bool syncIsReadyForRead(int64_t rid) { + SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid); + if (pSyncNode == NULL) { + return false; + } + ASSERT(rid == pSyncNode->rid); + + // TODO: last not noop? + SyncIndex lastIndex = syncNodeGetLastIndex(pSyncNode); + bool b = (pSyncNode->state == TAOS_SYNC_STATE_LEADER) && (pSyncNode->commitIndex >= lastIndex - SYNC_MAX_READ_RANGE); + taosReleaseRef(tsNodeRefId, pSyncNode->rid); + + // if false, set error code + if (false == b) { + if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) { + terrno = TSDB_CODE_SYN_NOT_LEADER; + } else { + terrno = TSDB_CODE_APP_NOT_READY; + } + } + return b; +} + bool syncIsRestoreFinish(int64_t rid) { SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid); if (pSyncNode == NULL) { @@ -519,6 +542,30 @@ SyncTerm syncGetMyTerm(int64_t rid) { return term; } +SyncIndex syncGetLastIndex(int64_t rid) { + SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid); + if (pSyncNode == NULL) { + return SYNC_INDEX_INVALID; + } + ASSERT(rid == pSyncNode->rid); + SyncIndex lastIndex = syncNodeGetLastIndex(pSyncNode); + + taosReleaseRef(tsNodeRefId, pSyncNode->rid); + return lastIndex; +} + +SyncIndex syncGetCommitIndex(int64_t rid) { + SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid); + if (pSyncNode == NULL) { + return SYNC_INDEX_INVALID; + } + ASSERT(rid == pSyncNode->rid); + SyncIndex cmtIndex = pSyncNode->commitIndex; + + taosReleaseRef(tsNodeRefId, pSyncNode->rid); + return cmtIndex; +} + SyncGroupId syncGetVgId(int64_t rid) { SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid); if (pSyncNode == NULL) { @@ -828,6 +875,15 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak) { pSyncNode->changing = true; } + // not restored, vnode enable + if (!pSyncNode->restoreFinish && pSyncNode->vgId != 1) { + ret = -1; + terrno = TSDB_CODE_SYN_PROPOSE_NOT_READY; + sError("vgId:%d, failed to sync propose since not ready, type:%s, last:%ld, cmt:%ld", pSyncNode->vgId, + TMSG_INFO(pMsg->msgType), syncNodeGetLastIndex(pSyncNode), pSyncNode->commitIndex); + goto _END; + } + SRespStub stub; stub.createTime = taosGetTimestampMs(); stub.rpcMsg = *pMsg; @@ -1626,13 +1682,13 @@ inline void syncNodeEventLog(const SSyncNode* pSyncNode, char* str) { ", sby:%d, " "stgy:%d, bch:%d, " "r-num:%d, " - "lcfg:%" PRId64 ", chging:%d, rsto:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s", + "lcfg:%" PRId64 ", chging:%d, rsto:%d, dquorum:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), str, pSyncNode->pRaftStore->currentTerm, pSyncNode->commitIndex, logBeginIndex, logLastIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm, pSyncNode->pRaftCfg->isStandBy, pSyncNode->pRaftCfg->snapshotStrategy, pSyncNode->pRaftCfg->batchSize, pSyncNode->replicaNum, pSyncNode->pRaftCfg->lastConfigIndex, pSyncNode->changing, - pSyncNode->restoreFinish, pSyncNode->electTimerLogicClockUser, pSyncNode->heartbeatTimerLogicClockUser, - printStr); + pSyncNode->restoreFinish, syncNodeDynamicQuorum(pSyncNode), pSyncNode->electTimerLogicClockUser, + pSyncNode->heartbeatTimerLogicClockUser, printStr); } else { snprintf(logBuf, sizeof(logBuf), "%s", str); } @@ -1650,12 +1706,13 @@ inline void syncNodeEventLog(const SSyncNode* pSyncNode, char* str) { ", sby:%d, " "stgy:%d, bch:%d, " "r-num:%d, " - "lcfg:%" PRId64 ", chging:%d, rsto:%d, %s", + "lcfg:%" PRId64 ", chging:%d, rsto:%d, dquorum:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), str, pSyncNode->pRaftStore->currentTerm, pSyncNode->commitIndex, logBeginIndex, logLastIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm, pSyncNode->pRaftCfg->isStandBy, pSyncNode->pRaftCfg->snapshotStrategy, pSyncNode->pRaftCfg->batchSize, pSyncNode->replicaNum, pSyncNode->pRaftCfg->lastConfigIndex, pSyncNode->changing, - pSyncNode->restoreFinish, printStr); + pSyncNode->restoreFinish, syncNodeDynamicQuorum(pSyncNode), pSyncNode->electTimerLogicClockUser, + pSyncNode->heartbeatTimerLogicClockUser, printStr); } else { snprintf(s, len, "%s", str); } @@ -2124,6 +2181,11 @@ void syncNodeBecomeLeader(SSyncNode* pSyncNode, const char* debugStr) { (pMySender->privateTerm) += 100; } + // close receiver + if (snapshotReceiverIsStart(pSyncNode->pNewNodeReceiver)) { + snapshotReceiverForceStop(pSyncNode->pNewNodeReceiver); + } + // stop elect timer syncNodeStopElectTimer(pSyncNode); @@ -2227,7 +2289,7 @@ bool syncNodeHasSnapshot(SSyncNode* pSyncNode) { // return max(logLastIndex, snapshotLastIndex) // if no snapshot and log, return -1 -SyncIndex syncNodeGetLastIndex(SSyncNode* pSyncNode) { +SyncIndex syncNodeGetLastIndex(const SSyncNode* pSyncNode) { SSnapshot snapshot = {.data = NULL, .lastApplyIndex = -1, .lastApplyTerm = 0, .lastConfigIndex = -1}; if (pSyncNode->pFsm->FpGetSnapshotInfo != NULL) { pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, &snapshot); @@ -2716,11 +2778,27 @@ int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* p return 0; } - if (ths->vgId > 1) { - syncNodeEventLog(ths, "I am vnode, can not do leader transfer"); + if (pEntry->term < ths->pRaftStore->currentTerm) { + char logBuf[128]; + snprintf(logBuf, sizeof(logBuf), "little term:%lu, can not do leader transfer", pEntry->term); + syncNodeEventLog(ths, logBuf); + return 0; + } + + if (pEntry->index < syncNodeGetLastIndex(ths)) { + char logBuf[128]; + snprintf(logBuf, sizeof(logBuf), "little index:%ld, can not do leader transfer", pEntry->index); + syncNodeEventLog(ths, logBuf); return 0; } + /* + if (ths->vgId > 1) { + syncNodeEventLog(ths, "I am vnode, can not do leader transfer"); + return 0; + } + */ + do { char logBuf[128]; snprintf(logBuf, sizeof(logBuf), "do leader transfer, index:%ld", pEntry->index); diff --git a/source/libs/sync/src/syncMessage.c b/source/libs/sync/src/syncMessage.c index 13adaf055c1c320f56337ea90537e592fafcbe89..b42aba560fa1c26ef9426b55729c1d39cafa8a24 100644 --- a/source/libs/sync/src/syncMessage.c +++ b/source/libs/sync/src/syncMessage.c @@ -1947,6 +1947,8 @@ cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) { cJSON_AddNumberToObject(pRoot, "success", pMsg->success); snprintf(u64buf, sizeof(u64buf), "%" PRId64, pMsg->matchIndex); cJSON_AddStringToObject(pRoot, "matchIndex", u64buf); + snprintf(u64buf, sizeof(u64buf), "%" PRId64, pMsg->startTime); + cJSON_AddStringToObject(pRoot, "startTime", u64buf); } cJSON* pJson = cJSON_CreateObject(); diff --git a/source/libs/sync/src/syncRaftCfg.c b/source/libs/sync/src/syncRaftCfg.c index 5de21bceca99e13b3c2c33b72cd96f0ca6f86fa8..ab404d1b9af744b51b508cd1f870482c79756ea1 100644 --- a/source/libs/sync/src/syncRaftCfg.c +++ b/source/libs/sync/src/syncRaftCfg.c @@ -171,7 +171,7 @@ SRaftCfg *raftCfgOpen(const char *path) { taosLSeekFile(pCfg->pFile, 0, SEEK_SET); - char buf[1024] = {0}; + char buf[CONFIG_FILE_LEN] = {0}; int len = taosReadFile(pCfg->pFile, buf, sizeof(buf)); ASSERT(len > 0); diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c index 24f75de5d36b89cdbff84aa7846ff972259a882b..886f7ad199818dd81b42e07f14f4679f3a77ac1e 100644 --- a/source/libs/sync/src/syncReplication.c +++ b/source/libs/sync/src/syncReplication.c @@ -116,6 +116,120 @@ int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode) { return ret; } +int32_t syncNodeAppendEntriesOnePeer(SSyncNode* pSyncNode, SRaftId* pDestId, SyncIndex nextIndex) { + int32_t ret = 0; + + // pre index, pre term + SyncIndex preLogIndex = syncNodeGetPreIndex(pSyncNode, nextIndex); + SyncTerm preLogTerm = syncNodeGetPreTerm(pSyncNode, nextIndex); + if (preLogTerm == SYNC_TERM_INVALID) { + SyncIndex newNextIndex = syncNodeGetLastIndex(pSyncNode) + 1; + // SyncIndex newNextIndex = nextIndex + 1; + + syncIndexMgrSetIndex(pSyncNode->pNextIndex, pDestId, newNextIndex); + syncIndexMgrSetIndex(pSyncNode->pMatchIndex, pDestId, SYNC_INDEX_INVALID); + sError("vgId:%d, sync get pre term error, nextIndex:%" PRId64 ", update next-index:%" PRId64 + ", match-index:%d, raftid:%" PRId64, + pSyncNode->vgId, nextIndex, newNextIndex, SYNC_INDEX_INVALID, pDestId->addr); + return -1; + } + + // entry pointer array + SSyncRaftEntry* entryPArr[SYNC_MAX_BATCH_SIZE]; + memset(entryPArr, 0, sizeof(entryPArr)); + + // get entry batch + int32_t getCount = 0; + SyncIndex getEntryIndex = nextIndex; + for (int32_t i = 0; i < pSyncNode->pRaftCfg->batchSize; ++i) { + SSyncRaftEntry* pEntry = NULL; + int32_t code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, getEntryIndex, &pEntry); + if (code == 0) { + ASSERT(pEntry != NULL); + entryPArr[i] = pEntry; + getCount++; + getEntryIndex++; + + } else { + break; + } + } + + // event log + do { + char logBuf[128]; + char host[64]; + uint16_t port; + syncUtilU642Addr(pDestId->addr, host, sizeof(host), &port); + snprintf(logBuf, sizeof(logBuf), "build batch:%d for %s:%d", getCount, host, port); + syncNodeEventLog(pSyncNode, logBuf); + } while (0); + + // build msg + SyncAppendEntriesBatch* pMsg = syncAppendEntriesBatchBuild(entryPArr, getCount, pSyncNode->vgId); + ASSERT(pMsg != NULL); + + // free entries + for (int32_t i = 0; i < pSyncNode->pRaftCfg->batchSize; ++i) { + SSyncRaftEntry* pEntry = entryPArr[i]; + if (pEntry != NULL) { + syncEntryDestory(pEntry); + entryPArr[i] = NULL; + } + } + + // prepare msg + pMsg->srcId = pSyncNode->myRaftId; + pMsg->destId = *pDestId; + pMsg->term = pSyncNode->pRaftStore->currentTerm; + pMsg->prevLogIndex = preLogIndex; + pMsg->prevLogTerm = preLogTerm; + pMsg->commitIndex = pSyncNode->commitIndex; + pMsg->privateTerm = 0; + pMsg->dataCount = getCount; + + // send msg + syncNodeAppendEntriesBatch(pSyncNode, pDestId, pMsg); + + // speed up + if (pMsg->dataCount > 0 && pSyncNode->commitIndex - pMsg->prevLogIndex > SYNC_SLOW_DOWN_RANGE) { + ret = 1; + +#if 0 + do { + char logBuf[128]; + char host[64]; + uint16_t port; + syncUtilU642Addr(pDestId->addr, host, sizeof(host), &port); + snprintf(logBuf, sizeof(logBuf), "maybe speed up for %s:%d, pre-index:%ld", host, port, pMsg->prevLogIndex); + syncNodeEventLog(pSyncNode, logBuf); + } while (0); +#endif + } + + syncAppendEntriesBatchDestroy(pMsg); + + return ret; +} + +int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) { + if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) { + return -1; + } + + int32_t ret = 0; + for (int i = 0; i < pSyncNode->peersNum; ++i) { + SRaftId* pDestId = &(pSyncNode->peersId[i]); + + // next index + SyncIndex nextIndex = syncIndexMgrGetIndex(pSyncNode->pNextIndex, pDestId); + ret = syncNodeAppendEntriesOnePeer(pSyncNode, pDestId, nextIndex); + } + + return ret; +} + +#if 0 int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) { if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) { return -1; @@ -221,6 +335,7 @@ int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) { return ret; } +#endif int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode) { ASSERT(pSyncNode->state == TAOS_SYNC_STATE_LEADER); diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c index 702e9f01dcb250b8c07c776865f8f274eff89f20..5489a107e76082106961a0ed107413e5ec9b4a64 100644 --- a/source/libs/sync/src/syncSnapshot.c +++ b/source/libs/sync/src/syncSnapshot.c @@ -24,7 +24,6 @@ //---------------------------------- static void snapshotSenderUpdateProgress(SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg); static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg); -static void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver); static void snapshotReceiverGotData(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg); static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg); @@ -374,14 +373,14 @@ cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender) { char *snapshotSender2Str(SSyncSnapshotSender *pSender) { cJSON *pJson = snapshotSender2Json(pSender); - char * serialized = cJSON_Print(pJson); + char *serialized = cJSON_Print(pJson); cJSON_Delete(pJson); return serialized; } char *snapshotSender2SimpleStr(SSyncSnapshotSender *pSender, char *event) { int32_t len = 256; - char * s = taosMemoryMalloc(len); + char *s = taosMemoryMalloc(len); SRaftId destId = pSender->pSyncNode->replicasId[pSender->replicaIndex]; char host[64]; @@ -480,7 +479,7 @@ static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncSnapsh } // force stop -static void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver) { +void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver) { // force close, abandon incomplete data if (pReceiver->pWriter != NULL) { int32_t ret = pReceiver->pSyncNode->pFsm->FpSnapshotStopWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter, false, @@ -653,7 +652,7 @@ cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) { cJSON_AddStringToObject(pFromId, "addr", u64buf); { uint64_t u64 = pReceiver->fromId.addr; - cJSON * pTmp = pFromId; + cJSON *pTmp = pFromId; char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); @@ -686,14 +685,14 @@ cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) { char *snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver) { cJSON *pJson = snapshotReceiver2Json(pReceiver); - char * serialized = cJSON_Print(pJson); + char *serialized = cJSON_Print(pJson); cJSON_Delete(pJson); return serialized; } char *snapshotReceiver2SimpleStr(SSyncSnapshotReceiver *pReceiver, char *event) { int32_t len = 256; - char * s = taosMemoryMalloc(len); + char *s = taosMemoryMalloc(len); SRaftId fromId = pReceiver->fromId; char host[128]; diff --git a/source/libs/sync/test/syncAppendEntriesReplyTest.cpp b/source/libs/sync/test/syncAppendEntriesReplyTest.cpp index d41e99a3cd2e7d866ffd880a2664ad9df2d21ef6..72d3fd5ef34d83d9a2db7c674abd58a989bfed3e 100644 --- a/source/libs/sync/test/syncAppendEntriesReplyTest.cpp +++ b/source/libs/sync/test/syncAppendEntriesReplyTest.cpp @@ -24,6 +24,7 @@ SyncAppendEntriesReply *createMsg() { pMsg->matchIndex = 77; pMsg->term = 33; pMsg->privateTerm = 44; + pMsg->startTime = taosGetTimestampMs(); return pMsg; } @@ -89,6 +90,8 @@ void test5() { } int main() { + gRaftDetailLog = true; + tsAsyncLog = 0; sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE; logTest(); diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index 7a44edb12cddf5a386e3b77031920559d8b0a5e9..1480920f908e34bb7be5b95affe64619ac042289 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -509,7 +509,7 @@ static int tdbBtreeBalanceDeeper(SBTree *pBt, SPage *pRoot, SPage **ppChild, TXN static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTxn) { int ret; - int nOlds; + int nOlds, pageIdx; SPage *pOlds[3] = {0}; SCell *pDivCell[3] = {0}; int szDivCell[3]; @@ -782,6 +782,11 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx pBt); tdbPageInsertCell(pParent, sIdx++, pNewCell, szNewCell, 0); tdbOsFree(pNewCell); + + if (TDB_CELLDECODER_FREE_VAL(&cd)) { + tdbFree(cd.pVal); + cd.pVal = NULL; + } } // move to next new page @@ -844,13 +849,11 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx } } - // TODO: here is not corrent for drop case - for (int i = 0; i < nNews; i++) { - if (i < nOlds) { - tdbPagerReturnPage(pBt->pPager, pOlds[i], pTxn); - } else { - tdbPagerReturnPage(pBt->pPager, pNews[i], pTxn); - } + for (pageIdx = 0; pageIdx < nOlds; ++pageIdx) { + tdbPagerReturnPage(pBt->pPager, pOlds[pageIdx], pTxn); + } + for (; pageIdx < nNews; ++pageIdx) { + tdbPagerReturnPage(pBt->pPager, pNews[pageIdx], pTxn); } return 0; @@ -934,6 +937,8 @@ static int tdbFetchOvflPage(SPgno *pPgno, SPage **ppOfp, TXN *pTxn, SBTree *pBt) return -1; } + tdbPCacheRelease(pBt->pPager->pCache, *ppOfp, pTxn); + return ret; } @@ -1277,6 +1282,8 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, nLeft -= bytes; memcpy(&pgno, ofpCell + bytes, sizeof(pgno)); + + tdbPCacheRelease(pBt->pPager->pCache, ofp, pTxn); } } else { int nLeftKey = kLen; @@ -1336,6 +1343,8 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, memcpy(&pgno, ofpCell + bytes, sizeof(pgno)); + tdbPCacheRelease(pBt->pPager->pCache, ofp, pTxn); + nLeftKey -= bytes; nLeft -= bytes; } @@ -1374,6 +1383,8 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, memcpy(&pgno, ofpCell + vLen - nLeft + bytes, sizeof(pgno)); + tdbPCacheRelease(pBt->pPager->pCache, ofp, pTxn); + nLeft -= bytes; } } @@ -1401,7 +1412,7 @@ static int tdbBtreeDecodeCell(SPage *pPage, const SCell *pCell, SCellDecoder *pD pDecoder->pgno = 0; TDB_CELLDECODER_SET_FREE_NIL(pDecoder); - tdbDebug("tdb btc decoder set nil: %p/0x%x ", pDecoder, pDecoder->freeKV); + // tdbTrace("tdb btc decoder set nil: %p/0x%x ", pDecoder, pDecoder->freeKV); // 1. Decode header part if (!leaf) { diff --git a/source/libs/tdb/src/db/tdbPCache.c b/source/libs/tdb/src/db/tdbPCache.c index 22229ea0e8407cf57a7e8aa3ed40ca47cf255c12..62541585911a5dfdc84c0d2fb84724c83efc5475 100644 --- a/source/libs/tdb/src/db/tdbPCache.c +++ b/source/libs/tdb/src/db/tdbPCache.c @@ -98,6 +98,7 @@ SPage *tdbPCacheFetch(SPCache *pCache, const SPgid *pPgid, TXN *pTxn) { // printf("thread %" PRId64 " fetch page %d pgno %d pPage %p nRef %d\n", taosGetSelfPthreadId(), pPage->id, // TDB_PAGE_PGNO(pPage), pPage, nRef); + tdbDebug("pcache/fetch page %p/%d/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id, nRef); return pPage; } @@ -111,6 +112,7 @@ void tdbPCacheRelease(SPCache *pCache, SPage *pPage, TXN *pTxn) { tdbPCacheLock(pCache); nRef = tdbUnrefPage(pPage); + tdbDebug("pcache/release page %p/%d/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id, nRef); if (nRef == 0) { // test the nRef again to make sure // it is safe th handle the page @@ -145,7 +147,7 @@ static SPage *tdbPCacheFetchImpl(SPCache *pCache, const SPgid *pPgid, TXN *pTxn) // 1. Search the hash table pPage = pCache->pgHash[tdbPCachePageHash(pPgid) % pCache->nHash]; while (pPage) { - if (memcmp(pPage->pgid.fileid, pPgid->fileid, TDB_FILE_ID_LEN) == 0 && pPage->pgid.pgno == pPgid->pgno) break; + if (pPage->pgid.pgno == pPgid->pgno && memcmp(pPage->pgid.fileid, pPgid->fileid, TDB_FILE_ID_LEN) == 0) break; pPage = pPage->pHashNext; } @@ -199,10 +201,21 @@ static SPage *tdbPCacheFetchImpl(SPCache *pCache, const SPgid *pPgid, TXN *pTxn) if (pPageH) { // copy the page content memcpy(&(pPage->pgid), pPgid, sizeof(*pPgid)); + + for (int nLoops = 0;;) { + if (pPageH->pPager) break; + if (++nLoops > 1000) { + sched_yield(); + nLoops = 0; + } + } + pPage->pLruNext = NULL; pPage->pPager = pPageH->pPager; memcpy(pPage->pData, pPageH->pData, pPage->pageSize); + tdbDebug("pcache/pPageH: %p %d %p %p %d", pPageH, pPageH->pPageHdr - pPageH->pData, pPageH->xCellSize, pPage, + TDB_PAGE_PGNO(pPageH)); tdbPageInit(pPage, pPageH->pPageHdr - pPageH->pData, pPageH->xCellSize); pPage->kLen = pPageH->kLen; pPage->vLen = pPageH->vLen; @@ -233,7 +246,7 @@ static void tdbPCachePinPage(SPCache *pCache, SPage *pPage) { pCache->nRecyclable--; // printf("pin page %d pgno %d pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage); - tdbTrace("pin page %d", pPage->id); + tdbDebug("pcache/pin page %p/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id); } } @@ -254,15 +267,14 @@ static void tdbPCacheUnpinPage(SPCache *pCache, SPage *pPage) { pCache->nRecyclable++; // printf("unpin page %d pgno %d pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage); - tdbTrace("unpin page %d", pPage->id); + tdbDebug("pcache/unpin page %p/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id); } static void tdbPCacheRemovePageFromHash(SPCache *pCache, SPage *pPage) { - SPage **ppPage; - uint32_t h; + uint32_t h = tdbPCachePageHash(&(pPage->pgid)) % pCache->nHash; - h = tdbPCachePageHash(&(pPage->pgid)); - for (ppPage = &(pCache->pgHash[h % pCache->nHash]); (*ppPage) && *ppPage != pPage; ppPage = &((*ppPage)->pHashNext)) + SPage **ppPage = &(pCache->pgHash[h]); + for (; (*ppPage) && *ppPage != pPage; ppPage = &((*ppPage)->pHashNext)) ; if (*ppPage) { @@ -271,13 +283,11 @@ static void tdbPCacheRemovePageFromHash(SPCache *pCache, SPage *pPage) { // printf("rmv page %d to hash, pgno %d, pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage); } - tdbTrace("remove page %d to hash", pPage->id); + tdbDebug("pcache/remove page %p/%d/%d from hash %" PRIu32, pPage, TDB_PAGE_PGNO(pPage), pPage->id, h); } static void tdbPCacheAddPageToHash(SPCache *pCache, SPage *pPage) { - int h; - - h = tdbPCachePageHash(&(pPage->pgid)) % pCache->nHash; + uint32_t h = tdbPCachePageHash(&(pPage->pgid)) % pCache->nHash; pPage->pHashNext = pCache->pgHash[h]; pCache->pgHash[h] = pPage; @@ -285,7 +295,7 @@ static void tdbPCacheAddPageToHash(SPCache *pCache, SPage *pPage) { pCache->nPage++; // printf("add page %d to hash, pgno %d, pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage); - tdbTrace("add page %d to hash", pPage->id); + tdbDebug("pcache/add page %p/%d/%d to hash %" PRIu32, pPage, TDB_PAGE_PGNO(pPage), pPage->id, h); } static int tdbPCacheOpenImpl(SPCache *pCache) { diff --git a/source/libs/tdb/src/db/tdbPage.c b/source/libs/tdb/src/db/tdbPage.c index 276b06b147586bbf18fe73f94cdb2592032d97e2..a3f376b929291780bdd57cbf99f5db6035e70aff 100644 --- a/source/libs/tdb/src/db/tdbPage.c +++ b/source/libs/tdb/src/db/tdbPage.c @@ -68,12 +68,15 @@ int tdbPageCreate(int pageSize, SPage **ppPage, void *(*xMalloc)(void *, size_t) } *ppPage = pPage; + + tdbDebug("page/create: %p %p", pPage, xMalloc); return 0; } int tdbPageDestroy(SPage *pPage, void (*xFree)(void *arg, void *ptr), void *arg) { u8 *ptr; + tdbDebug("page/destroy: %p %p", pPage, xFree); ASSERT(xFree); for (int iOvfl = 0; iOvfl < pPage->nOverflow; iOvfl++) { @@ -87,6 +90,7 @@ int tdbPageDestroy(SPage *pPage, void (*xFree)(void *arg, void *ptr), void *arg) } void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt)) { + tdbDebug("page/zero: %p %" PRIu8 " %p", pPage, szAmHdr, xCellSize); pPage->pPageHdr = pPage->pData + szAmHdr; TDB_PAGE_NCELLS_SET(pPage, 0); TDB_PAGE_CCELLS_SET(pPage, pPage->pageSize - sizeof(SPageFtr)); @@ -103,6 +107,7 @@ void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell } void tdbPageInit(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt)) { + tdbDebug("page/init: %p %" PRIu8 " %p", pPage, szAmHdr, xCellSize); pPage->pPageHdr = pPage->pData + szAmHdr; pPage->pCellIdx = pPage->pPageHdr + TDB_PAGE_HDR_SIZE(pPage); pPage->pFreeStart = pPage->pCellIdx + TDB_PAGE_OFFSET_SIZE(pPage) * TDB_PAGE_NCELLS(pPage); diff --git a/source/libs/tdb/src/db/tdbPager.c b/source/libs/tdb/src/db/tdbPager.c index 4de99e8b1bde34c7f6583d0aedc205074d7c1cca..f90c39278888c7838b0c4b1b5b434e4c06fb30a0 100644 --- a/source/libs/tdb/src/db/tdbPager.c +++ b/source/libs/tdb/src/db/tdbPager.c @@ -166,6 +166,7 @@ int tdbPagerWrite(SPager *pPager, SPage *pPage) { // ref page one more time so the page will not be release tdbRefPage(pPage); + tdbDebug("pcache/mdirty page %p/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id); // Set page as dirty pPage->isDirty = 1; diff --git a/source/libs/tdb/src/inc/tdbInt.h b/source/libs/tdb/src/inc/tdbInt.h index 49126b80b6e5dd11f30a7cddf581f42994db7bec..6a694cf8f1f8cce95e7fa9373e2aa2c01128a6d9 100644 --- a/source/libs/tdb/src/inc/tdbInt.h +++ b/source/libs/tdb/src/inc/tdbInt.h @@ -280,13 +280,13 @@ struct SPage { static inline i32 tdbRefPage(SPage *pPage) { i32 nRef = atomic_add_fetch_32(&((pPage)->nRef), 1); - tdbTrace("ref page %d, nRef %d", pPage->id, nRef); + tdbTrace("ref page %p/%d, nRef %d", pPage, pPage->id, nRef); return nRef; } static inline i32 tdbUnrefPage(SPage *pPage) { i32 nRef = atomic_sub_fetch_32(&((pPage)->nRef), 1); - tdbTrace("unref page %d, nRef %d", pPage->id, nRef); + tdbTrace("unref page %p/%d, nRef %d", pPage, pPage->id, nRef); return nRef; } diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h index 04b58da570f81aba36766d3ce9795742c12bc3b4..c6f3066be72c289b3e2bd57d1b4b995c15fc7dac 100644 --- a/source/libs/transport/inc/transComm.h +++ b/source/libs/transport/inc/transComm.h @@ -98,6 +98,17 @@ typedef void* queue[2]; #define TRANS_RETRY_INTERVAL 15 // retry interval (ms) #define TRANS_CONN_TIMEOUT 3 // connect timeout (s) #define TRANS_READ_TIMEOUT 3000 // read timeout (ms) +#define TRANS_PACKET_LIMIT 1024 * 1024 * 512 + +#define TRANS_MAGIC_NUM 0x5f375a86 + +#define TRANS_NOVALID_PACKET(src) ((src) != TRANS_MAGIC_NUM ? 1 : 0) + +#define TRANS_PACKET_LIMIT 1024 * 1024 * 512 + +#define TRANS_MAGIC_NUM 0x5f375a86 + +#define TRANS_NOVALID_PACKET(src) ((src) != TRANS_MAGIC_NUM ? 1 : 0) typedef SRpcMsg STransMsg; typedef SRpcCtx STransCtx; @@ -151,6 +162,7 @@ typedef struct { char hasEpSet : 2; // contain epset or not, 0(default): no epset, 1: contain epset char user[TSDB_UNI_LEN]; + uint32_t magicNum; STraceId traceId; uint64_t ahandle; // ahandle assigned by client uint32_t code; // del later @@ -203,6 +215,7 @@ typedef struct SConnBuffer { int cap; int left; int total; + int invalid; } SConnBuffer; typedef void (*AsyncCB)(uv_async_t* handle); @@ -293,7 +306,7 @@ int transSendResponse(const STransMsg* msg); int transRegisterMsg(const STransMsg* msg); int transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn); -int transGetSockDebugInfo(struct sockaddr* sockname, char* dst); +int transSockInfo2Str(struct sockaddr* sockname, char* dst); int64_t transAllocHandle(); diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c index 62277a7569a836f84e2ea143dc648737c208b3c8..e880a5abdba4cc36383e93ec51214e58fe69c8a6 100644 --- a/source/libs/transport/src/thttp.c +++ b/source/libs/transport/src/thttp.c @@ -14,15 +14,26 @@ */ #define _DEFAULT_SOURCE -#ifdef USE_UV -#include -#endif // clang-format off +#include #include "zlib.h" #include "thttp.h" #include "taoserror.h" #include "tlog.h" + +#define HTTP_RECV_BUF_SIZE 1024 + +typedef struct SHttpClient { + uv_connect_t conn; + uv_tcp_t tcp; + uv_write_t req; + uv_buf_t* wbuf; + char *rbuf; + char* addr; + uint16_t port; +} SHttpClient; + static int32_t taosBuildHttpHeader(const char* server, int32_t contLen, char* pHead, int32_t headLen, EHttpCompFlag flag) { if (flag == HTTP_FLAT) { @@ -45,7 +56,7 @@ static int32_t taosBuildHttpHeader(const char* server, int32_t contLen, char* pH } } -int32_t taosCompressHttpRport(char* pSrc, int32_t srcLen) { +static int32_t taosCompressHttpRport(char* pSrc, int32_t srcLen) { int32_t code = -1; int32_t destLen = srcLen; void* pDest = taosMemoryMalloc(destLen); @@ -114,84 +125,71 @@ _OVER: return code; } -#ifdef USE_UV +static void destroyHttpClient(SHttpClient* cli) { + taosMemoryFree(cli->wbuf); + taosMemoryFree(cli->rbuf); + taosMemoryFree(cli->addr); + taosMemoryFree(cli); + +} +static void clientCloseCb(uv_handle_t* handle) { + SHttpClient* cli = handle->data; + destroyHttpClient(cli); +} +static void clientAllocBuffCb(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) { + SHttpClient* cli = handle->data; + buf->base = cli->rbuf; + buf->len = HTTP_RECV_BUF_SIZE; +} +static void clientRecvCb(uv_stream_t* handle, ssize_t nread, const uv_buf_t *buf) { + SHttpClient* cli = handle->data; + if (nread < 0) { + uError("http-report recv error:%s", uv_err_name(nread)); + } else { + uTrace("http-report succ to recv %d bytes, just ignore it", nread); + } + uv_close((uv_handle_t*)&cli->tcp, clientCloseCb); +} +static void clientSentCb(uv_write_t* req, int32_t status) { + SHttpClient* cli = req->data; + if (status != 0) { + terrno = TAOS_SYSTEM_ERROR(status); + uError("http-report failed to send data %s", uv_strerror(status)); + uv_close((uv_handle_t*)&cli->tcp, clientCloseCb); + return; + } else { + uTrace("http-report succ to send data"); + } + uv_read_start((uv_stream_t *)&cli->tcp, clientAllocBuffCb, clientRecvCb); +} static void clientConnCb(uv_connect_t* req, int32_t status) { - if (status < 0) { + SHttpClient* cli = req->data; + if (status != 0) { terrno = TAOS_SYSTEM_ERROR(status); - uError("connection error %s", uv_strerror(status)); - uv_close((uv_handle_t*)req->handle, NULL); + uError("http-report failed to conn to server, reason:%s, dst:%s:%d", uv_strerror(status), cli->addr, cli->port); + uv_close((uv_handle_t*)&cli->tcp, clientCloseCb); return; } - uv_buf_t* wb = req->data; - assert(wb != NULL); - uv_write_t write_req; - uv_write(&write_req, req->handle, wb, 2, NULL); - uv_close((uv_handle_t*)req->handle, NULL); + uv_write(&cli->req, (uv_stream_t*)&cli->tcp, cli->wbuf, 2, clientSentCb); } -int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag) { - uint32_t ipv4 = taosGetIpv4FromFqdn(server); - if (ipv4 == 0xffffffff) { +static int32_t taosBuildDstAddr(const char* server, uint16_t port, struct sockaddr_in* dest) { + uint32_t ip = taosGetIpv4FromFqdn(server); + if (ip == 0xffffffff) { terrno = TAOS_SYSTEM_ERROR(errno); - uError("failed to get http server:%s ip since %s", server, terrstr()); + uError("http-report failed to get http server:%s since %s", server, errno == 0 ? "invalid http server" : terrstr()); return -1; } - - char ipv4Buf[128] = {0}; - tinet_ntoa(ipv4Buf, ipv4); - - struct sockaddr_in dest = {0}; - uv_ip4_addr(ipv4Buf, port, &dest); - - uv_tcp_t socket_tcp = {0}; - uv_loop_t* loop = uv_default_loop(); - uv_tcp_init(loop, &socket_tcp); - uv_connect_t* connect = (uv_connect_t*)taosMemoryMalloc(sizeof(uv_connect_t)); - - if (flag == HTTP_GZIP) { - int32_t dstLen = taosCompressHttpRport(pCont, contLen); - if (dstLen > 0) { - contLen = dstLen; - } else { - flag = HTTP_FLAT; - } - } - - char header[1024] = {0}; - int32_t headLen = taosBuildHttpHeader(server, contLen, header, sizeof(header), flag); - - uv_buf_t wb[2]; - wb[0] = uv_buf_init((char*)header, headLen); - wb[1] = uv_buf_init((char*)pCont, contLen); - - connect->data = wb; - terrno = 0; - uv_tcp_connect(connect, &socket_tcp, (const struct sockaddr*)&dest, clientConnCb); - uv_run(loop, UV_RUN_DEFAULT); - uv_loop_close(loop); - taosMemoryFree(connect); - return terrno; + char buf[128] = {0}; + tinet_ntoa(buf, ip); + uv_ip4_addr(buf, port, dest); + return 0; } - -#else int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag) { - int32_t code = -1; - TdSocketPtr pSocket = NULL; - - uint32_t ip = taosGetIpv4FromFqdn(server); - if (ip == 0xffffffff) { - terrno = TAOS_SYSTEM_ERROR(errno); - uError("failed to get http server:%s ip since %s", server, terrstr()); - goto SEND_OVER; - } - - pSocket = taosOpenTcpClientSocket(ip, port, 0); - if (pSocket == NULL) { - terrno = TAOS_SYSTEM_ERROR(errno); - uError("failed to create http socket to %s:%u since %s", server, port, terrstr()); - goto SEND_OVER; + struct sockaddr_in dest = {0}; + if (taosBuildDstAddr(server, port, &dest) < 0) { + return -1; } - if (flag == HTTP_GZIP) { int32_t dstLen = taosCompressHttpRport(pCont, contLen); if (dstLen > 0) { @@ -200,37 +198,39 @@ int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32 flag = HTTP_FLAT; } } + terrno = 0; - char header[1024] = {0}; + char header[2048] = {0}; int32_t headLen = taosBuildHttpHeader(server, contLen, header, sizeof(header), flag); - if (taosWriteMsg(pSocket, header, headLen) < 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - uError("failed to send http header to %s:%u since %s", server, port, terrstr()); - goto SEND_OVER; - } - if (taosWriteMsg(pSocket, (void*)pCont, contLen) < 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - uError("failed to send http content to %s:%u since %s", server, port, terrstr()); - goto SEND_OVER; - } + uv_buf_t* wb = taosMemoryCalloc(2, sizeof(uv_buf_t)); + wb[0] = uv_buf_init((char*)header, headLen); // stack var + wb[1] = uv_buf_init((char*)pCont, contLen); // heap var - // read something to avoid nginx error 499 - if (taosWriteMsg(pSocket, header, 10) < 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - uError("failed to receive response from %s:%u since %s", server, port, terrstr()); - goto SEND_OVER; - } + SHttpClient* cli = taosMemoryCalloc(1, sizeof(SHttpClient)); + cli->conn.data = cli; + cli->tcp.data = cli; + cli->req.data = cli; + cli->wbuf = wb; + cli->rbuf = taosMemoryCalloc(1, HTTP_RECV_BUF_SIZE); + cli->addr = tstrdup(server); + cli->port = port; - code = 0; + uv_loop_t* loop = uv_default_loop(); + uv_tcp_init(loop, &cli->tcp); + // set up timeout to avoid stuck; + int32_t fd = taosCreateSocketWithTimeout(5); + uv_tcp_open((uv_tcp_t*)&cli->tcp, fd); -SEND_OVER: - if (pSocket != NULL) { - taosCloseSocket(&pSocket); + int32_t ret = uv_tcp_connect(&cli->conn, &cli->tcp, (const struct sockaddr*)&dest, clientConnCb); + if (ret != 0) { + uError("http-report failed to connect to server, reason:%s, dst:%s:%d", uv_strerror(ret), cli->addr, cli->port); + destroyHttpClient(cli); + uv_stop(loop); } - return code; + uv_run(loop, UV_RUN_DEFAULT); + uv_loop_close(loop); + return terrno; } - // clang-format on -#endif diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 9eea43be2354e5407a31a7bd10d9f215d8dd0cb5..41688c733079f12fbd04683183dd80db3b65606d 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -16,7 +16,7 @@ #include "transComm.h" typedef struct SConnList { - queue conn; + queue conns; int32_t size; } SConnList; @@ -103,30 +103,24 @@ static SCliConn* getConnFromPool(void* pool, char* ip, uint32_t port); static void addConnToPool(void* pool, SCliConn* conn); static void doCloseIdleConn(void* param); -static int sockDebugInfo(struct sockaddr* sockname, char* dst) { - struct sockaddr_in addr = *(struct sockaddr_in*)sockname; - - char buf[16] = {0}; - int r = uv_ip4_name(&addr, (char*)buf, sizeof(buf)); - sprintf(dst, "%s:%d", buf, ntohs(addr.sin_port)); - return r; -} // register timer for read static void cliReadTimeoutCb(uv_timer_t* handle); // register timer in each thread to clear expire conn // static void cliTimeoutCb(uv_timer_t* handle); -// alloc buf for recv +// alloc buffer for recv static void cliAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf); -// callback after read nbytes from socket +// callback after recv nbytes from socket static void cliRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf); -// callback after write data to socket +// callback after send data to socket static void cliSendCb(uv_write_t* req, int status); -// callback after conn to server +// callback after conn to server static void cliConnCb(uv_connect_t* req, int status); static void cliAsyncCb(uv_async_t* handle); static void cliIdleCb(uv_idle_t* handle); static void cliPrepareCb(uv_prepare_t* handle); +static bool cliRecvReleaseReq(SCliConn* conn, STransMsgHead* pHead); + static int32_t allocConnRef(SCliConn* conn, bool update); static int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg); @@ -135,19 +129,14 @@ static SCliConn* cliCreateConn(SCliThrd* thrd); static void cliDestroyConn(SCliConn* pConn, bool clear /*clear tcp handle or not*/); static void cliDestroy(uv_handle_t* handle); static void cliSend(SCliConn* pConn); +static void cliDestroyConnMsgs(SCliConn* conn, bool destroy); -static bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx) { - if (code != 0) return false; - if (pCtx->retryCnt == 0) return false; - if (transEpSetIsEqual(&pCtx->epSet, &pCtx->origEpSet)) return false; - return true; -} +// cli util func +static bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx); +static void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr); + +static int32_t cliBuildExceptResp(SCliMsg* pMsg, STransMsg* resp); -void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr); -/* - * set TCP connection timeout per-socket level - */ -static int cliCreateSocket(); // process data read from server, add decompress etc later static void cliHandleResp(SCliConn* conn); // handle except about conn @@ -175,15 +164,14 @@ static void destroyThrdObj(SCliThrd* pThrd); static void cliWalkCb(uv_handle_t* handle, void* arg); static void cliReleaseUnfinishedMsg(SCliConn* conn) { - SCliMsg* pMsg = NULL; for (int i = 0; i < transQueueSize(&conn->cliMsgs); i++) { - pMsg = transQueueGet(&conn->cliMsgs, i); - if (pMsg != NULL && pMsg->ctx != NULL) { - if (conn->ctx.freeFunc != NULL) { - conn->ctx.freeFunc(pMsg->ctx->ahandle); + SCliMsg* msg = transQueueGet(&conn->cliMsgs, i); + if (msg != NULL && msg->ctx != NULL) { + if (conn->ctx.freeFunc != NULL && msg->ctx->ahandle != NULL) { + conn->ctx.freeFunc(msg->ctx->ahandle); } } - destroyCmsg(pMsg); + destroyCmsg(msg); } } #define CLI_RELEASE_UV(loop) \ @@ -211,28 +199,6 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) { #define CONN_PERSIST_TIME(para) ((para) <= 90000 ? 90000 : (para)) #define CONN_GET_HOST_THREAD(conn) (conn ? ((SCliConn*)conn)->hostThrd : NULL) #define CONN_GET_INST_LABEL(conn) (((STrans*)(((SCliThrd*)(conn)->hostThrd)->pTransInst))->label) -#define CONN_SHOULD_RELEASE(conn, head) \ - do { \ - if ((head)->release == 1 && (head->msgLen) == sizeof(*head)) { \ - uint64_t ahandle = head->ahandle; \ - CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle); \ - transClearBuffer(&conn->readBuf); \ - transFreeMsg(transContFromHead((char*)head)); \ - if (transQueueSize(&conn->cliMsgs) > 0 && ahandle == 0) { \ - SCliMsg* cliMsg = transQueueGet(&conn->cliMsgs, 0); \ - if (cliMsg->type == Release) return; \ - } \ - tDebug("%s conn %p receive release request, refId:%" PRId64 "", CONN_GET_INST_LABEL(conn), conn, conn->refId); \ - if (T_REF_VAL_GET(conn) > 1) { \ - transUnrefCliHandle(conn); \ - } \ - destroyCmsg(pMsg); \ - cliReleaseUnfinishedMsg(conn); \ - transQueueClear(&conn->cliMsgs); \ - addConnToPool(((SCliThrd*)conn->hostThrd)->pool, conn); \ - return; \ - } \ - } while (0) #define CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle) \ do { \ @@ -245,8 +211,10 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) { } \ if (i == sz) { \ pMsg = NULL; \ + tDebug("msg not found, %" PRIu64 "", ahandle); \ } else { \ pMsg = transQueueRm(&conn->cliMsgs, i); \ + tDebug("msg found, %" PRIu64 "", ahandle); \ } \ } while (0) #define CONN_GET_NEXT_SENDMSG(conn) \ @@ -346,10 +314,17 @@ void cliHandleResp(SCliConn* conn) { } STransMsgHead* pHead = NULL; - transDumpFromBuffer(&conn->readBuf, (char**)&pHead); + if (transDumpFromBuffer(&conn->readBuf, (char**)&pHead) <= 0) { + tDebug("%s conn %p recv invalid packet ", CONN_GET_INST_LABEL(conn), conn); + return; + } pHead->code = htonl(pHead->code); pHead->msgLen = htonl(pHead->msgLen); + if (cliRecvReleaseReq(conn, pHead)) { + return; + } + STransMsg transMsg = {0}; transMsg.contLen = transContLenFromMsg(pHead->msgLen); transMsg.pCont = transContFromHead((char*)pHead); @@ -361,8 +336,6 @@ void cliHandleResp(SCliConn* conn) { SCliMsg* pMsg = NULL; STransConnCtx* pCtx = NULL; - CONN_SHOULD_RELEASE(conn, pHead); - if (CONN_NO_PERSIST_BY_APP(conn)) { pMsg = transQueuePop(&conn->cliMsgs); @@ -383,7 +356,7 @@ void cliHandleResp(SCliConn* conn) { transMsg.info.ahandle); } } else { - pCtx = pMsg ? pMsg->ctx : NULL; + pCtx = pMsg->ctx; transMsg.info.ahandle = pCtx ? pCtx->ahandle : NULL; tDebug("%s conn %p get ahandle %p, persist: 1", CONN_GET_INST_LABEL(conn), conn, transMsg.info.ahandle); } @@ -395,7 +368,6 @@ void cliHandleResp(SCliConn* conn) { } STraceId* trace = &transMsg.info.traceId; - tGDebug("%s conn %p %s received from %s, local info:%s, len:%d, code str:%s", CONN_GET_INST_LABEL(conn), conn, TMSG_INFO(pHead->msgType), conn->dst, conn->src, transMsg.contLen, tstrerror(transMsg.code)); @@ -494,8 +466,8 @@ void* createConnPool(int size) { void* destroyConnPool(void* pool) { SConnList* connList = taosHashIterate((SHashObj*)pool, NULL); while (connList != NULL) { - while (!QUEUE_IS_EMPTY(&connList->conn)) { - queue* h = QUEUE_HEAD(&connList->conn); + while (!QUEUE_IS_EMPTY(&connList->conns)) { + queue* h = QUEUE_HEAD(&connList->conns); SCliConn* c = QUEUE_DATA(h, SCliConn, q); cliDestroyConn(c, true); } @@ -508,21 +480,21 @@ void* destroyConnPool(void* pool) { static SCliConn* getConnFromPool(void* pool, char* ip, uint32_t port) { char key[32] = {0}; CONN_CONSTRUCT_HASH_KEY(key, ip, port); - SHashObj* pPool = pool; - SConnList* plist = taosHashGet(pPool, key, strlen(key)); + + SConnList* plist = taosHashGet((SHashObj*)pool, key, strlen(key)); if (plist == NULL) { SConnList list = {0}; - taosHashPut(pPool, key, strlen(key), (void*)&list, sizeof(list)); - plist = taosHashGet(pPool, key, strlen(key)); - QUEUE_INIT(&plist->conn); + taosHashPut((SHashObj*)pool, key, strlen(key), (void*)&list, sizeof(list)); + plist = taosHashGet((SHashObj*)pool, key, strlen(key)); + QUEUE_INIT(&plist->conns); } - if (QUEUE_IS_EMPTY(&plist->conn)) { + if (QUEUE_IS_EMPTY(&plist->conns)) { return NULL; } plist->size -= 1; - queue* h = QUEUE_HEAD(&plist->conn); + queue* h = QUEUE_HEAD(&plist->conns); SCliConn* conn = QUEUE_DATA(h, SCliConn, q); conn->status = ConnNormal; QUEUE_REMOVE(&conn->q); @@ -538,22 +510,21 @@ static void addConnToPool(void* pool, SCliConn* conn) { if (conn->status == ConnInPool) { return; } - SCliThrd* thrd = conn->hostThrd; - CONN_HANDLE_THREAD_QUIT(thrd); - allocConnRef(conn, true); + SCliThrd* thrd = conn->hostThrd; if (conn->timer != NULL) { uv_timer_stop(conn->timer); taosArrayPush(thrd->timerList, &conn->timer); conn->timer->data = NULL; conn->timer = NULL; } + if (T_REF_VAL_GET(conn) > 1) { + transUnrefCliHandle(conn); + } + + cliDestroyConnMsgs(conn, false); - STrans* pTransInst = thrd->pTransInst; - cliReleaseUnfinishedMsg(conn); - transQueueClear(&conn->cliMsgs); - transCtxCleanup(&conn->ctx); conn->status = ConnInPool; if (conn->list == NULL) { @@ -564,18 +535,15 @@ static void addConnToPool(void* pool, SCliConn* conn) { } else { tTrace("%s conn %p added to conn pool, read buf cap:%d", CONN_GET_INST_LABEL(conn), conn, conn->readBuf.cap); } - assert(conn->list != NULL); - QUEUE_INIT(&conn->q); - QUEUE_PUSH(&conn->list->conn, &conn->q); + QUEUE_PUSH(&conn->list->conns, &conn->q); conn->list->size += 1; - conn->task = NULL; - assert(!QUEUE_IS_EMPTY(&conn->list->conn)); - if (conn->list->size >= 50) { STaskArg* arg = taosMemoryCalloc(1, sizeof(STaskArg)); arg->param1 = conn; arg->param2 = thrd; + + STrans* pTransInst = thrd->pTransInst; conn->task = transDQSched(thrd->timeoutQueue, doCloseIdleConn, arg, CONN_PERSIST_TIME(pTransInst->idleTime)); } } @@ -625,7 +593,12 @@ static void cliRecvCb(uv_stream_t* handle, ssize_t nread, const uv_buf_t* buf) { pBuf->len += nread; while (transReadComplete(pBuf)) { tTrace("%s conn %p read complete", CONN_GET_INST_LABEL(conn), conn); - cliHandleResp(conn); + if (pBuf->invalid) { + cliHandleExcept(conn); + break; + } else { + cliHandleResp(conn); + } } return; } @@ -710,11 +683,10 @@ static void cliDestroy(uv_handle_t* handle) { transRemoveExHandle(transGetRefMgt(), conn->refId); taosMemoryFree(conn->ip); - conn->stream->data = NULL; taosMemoryFree(conn->stream); - transCtxCleanup(&conn->ctx); - cliReleaseUnfinishedMsg(conn); - transQueueDestroy(&conn->cliMsgs); + + cliDestroyConnMsgs(conn, true); + tTrace("%s conn %p destroy successfully", CONN_GET_INST_LABEL(conn), conn); transReqQueueClear(&conn->wreqQueue); transDestroyBuffer(&conn->readBuf); @@ -757,8 +729,6 @@ static void cliSendCb(uv_write_t* req, int status) { } void cliSend(SCliConn* pConn) { - CONN_HANDLE_BROKEN(pConn); - assert(!transQueueEmpty(&pConn->cliMsgs)); SCliMsg* pCliMsg = NULL; @@ -775,8 +745,8 @@ void cliSend(SCliConn* pConn) { pMsg->pCont = (void*)rpcMallocCont(0); pMsg->contLen = 0; } - int msgLen = transMsgLenFromCont(pMsg->contLen); + int msgLen = transMsgLenFromCont(pMsg->contLen); STransMsgHead* pHead = transHeadFromCont(pMsg->pCont); pHead->ahandle = pCtx != NULL ? (uint64_t)pCtx->ahandle : 0; pHead->noResp = REQUEST_NO_RESP(pMsg) ? 1 : 0; @@ -786,8 +756,7 @@ void cliSend(SCliConn* pConn) { pHead->release = REQUEST_RELEASE_HANDLE(pCliMsg) ? 1 : 0; memcpy(pHead->user, pTransInst->user, strlen(pTransInst->user)); pHead->traceId = pMsg->info.traceId; - - uv_buf_t wb = uv_buf_init((char*)pHead, msgLen); + pHead->magicNum = htonl(TRANS_MAGIC_NUM); STraceId* trace = &pMsg->info.traceId; tGDebug("%s conn %p %s is sent to %s, local info %s, len:%d", CONN_GET_INST_LABEL(pConn), pConn, @@ -810,6 +779,8 @@ void cliSend(SCliConn* pConn) { tGTrace("%s conn %p start timer for msg:%s", CONN_GET_INST_LABEL(pConn), pConn, TMSG_INFO(pMsg->msgType)); uv_timer_start((uv_timer_t*)pConn->timer, cliReadTimeoutCb, TRANS_READ_TIMEOUT, 0); } + + uv_buf_t wb = uv_buf_init((char*)pHead, msgLen); uv_write_t* req = transReqQueuePush(&pConn->wreqQueue); uv_write(req, (uv_stream_t*)pConn->stream, &wb, 1, cliSendCb); return; @@ -825,16 +796,15 @@ void cliConnCb(uv_connect_t* req, int status) { cliHandleExcept(pConn); return; } - // int addrlen = sizeof(pConn->addr); struct sockaddr peername, sockname; int addrlen = sizeof(peername); uv_tcp_getpeername((uv_tcp_t*)pConn->stream, &peername, &addrlen); - transGetSockDebugInfo(&peername, pConn->dst); + transSockInfo2Str(&peername, pConn->dst); addrlen = sizeof(sockname); uv_tcp_getsockname((uv_tcp_t*)pConn->stream, &sockname, &addrlen); - transGetSockDebugInfo(&sockname, pConn->src); + transSockInfo2Str(&sockname, pConn->src); tTrace("%s conn %p connect to server successfully", CONN_GET_INST_LABEL(pConn), pConn); assert(pConn->stream == req->handle); @@ -858,7 +828,7 @@ static void cliHandleRelease(SCliMsg* pMsg, SCliThrd* pThrd) { int64_t refId = (int64_t)(pMsg->msg.info.handle); SExHandle* exh = transAcquireExHandle(transGetRefMgt(), refId); if (exh == NULL) { - tDebug("%" PRId64 " already release", refId); + tDebug("%" PRId64 " already released", refId); destroyCmsg(pMsg); return; } @@ -874,6 +844,9 @@ static void cliHandleRelease(SCliMsg* pMsg, SCliThrd* pThrd) { return; } cliSend(conn); + } else { + tError("%s conn %p already released", CONN_GET_INST_LABEL(conn), conn); + destroyCmsg(pMsg); } } static void cliHandleUpdate(SCliMsg* pMsg, SCliThrd* pThrd) { @@ -923,6 +896,27 @@ void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr) { } } } + +bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx) { + if (code != 0) return false; + if (pCtx->retryCnt == 0) return false; + if (transEpSetIsEqual(&pCtx->epSet, &pCtx->origEpSet)) return false; + return true; +} + +int32_t cliBuildExceptResp(SCliMsg* pMsg, STransMsg* pResp) { + if (pMsg == NULL) return -1; + + memset(pResp, 0, sizeof(STransMsg)); + + pResp->code = TSDB_CODE_RPC_BROKEN_LINK; + pResp->msgType = pMsg->msg.msgType + 1; + pResp->info.ahandle = pMsg->ctx ? pMsg->ctx->ahandle : NULL; + pResp->info.traceId = pMsg->msg.info.traceId; + + return 0; +} + void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { STrans* pTransInst = pThrd->pTransInst; STransConnCtx* pCtx = pMsg->ctx; @@ -938,13 +932,8 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { SCliConn* conn = cliGetConn(pMsg, pThrd, &ignore); if (ignore == true) { // persist conn already release by server - STransMsg resp = {0}; - resp.code = TSDB_CODE_RPC_BROKEN_LINK; - resp.msgType = pMsg->msg.msgType + 1; - - resp.info.ahandle = pMsg && pMsg->ctx ? pMsg->ctx->ahandle : NULL; - resp.info.traceId = pMsg->msg.info.traceId; - + STransMsg resp; + cliBuildExceptResp(pMsg, &resp); pTransInst->cfp(pTransInst->parent, &resp, NULL); destroyCmsg(pMsg); return; @@ -1009,9 +998,6 @@ static void cliAsyncCb(uv_async_t* handle) { QUEUE_REMOVE(h); SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q); - if (pMsg == NULL) { - continue; - } (*cliAsyncHandle[pMsg->type])(pMsg, pThrd); count++; } @@ -1053,6 +1039,64 @@ static void cliPrepareCb(uv_prepare_t* handle) { if (thrd->stopMsg != NULL) cliHandleQuit(thrd->stopMsg, thrd); } +void cliDestroyConnMsgs(SCliConn* conn, bool destroy) { + transCtxCleanup(&conn->ctx); + cliReleaseUnfinishedMsg(conn); + if (destroy == 1) { + transQueueDestroy(&conn->cliMsgs); + } else { + transQueueClear(&conn->cliMsgs); + } +} + +void cliIteraConnMsgs(SCliConn* conn) { + SCliThrd* pThrd = conn->hostThrd; + STrans* pTransInst = pThrd->pTransInst; + + for (int i = 0; i < transQueueSize(&conn->cliMsgs); i++) { + SCliMsg* cmsg = transQueueGet(&conn->cliMsgs, i); + if (cmsg->type == Release || REQUEST_NO_RESP(&cmsg->msg) || cmsg->msg.msgType == TDMT_SCH_DROP_TASK) { + continue; + } + + STransMsg resp = {0}; + if (-1 == cliBuildExceptResp(cmsg, &resp)) { + continue; + } + pTransInst->cfp(pTransInst->parent, &resp, NULL); + + cmsg->ctx->ahandle = NULL; + } +} +bool cliRecvReleaseReq(SCliConn* conn, STransMsgHead* pHead) { + if (pHead->release == 1 && (pHead->msgLen) == sizeof(*pHead)) { + uint64_t ahandle = pHead->ahandle; + tDebug("ahandle = %" PRIu64 "", ahandle); + SCliMsg* pMsg = NULL; + CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle); + + transClearBuffer(&conn->readBuf); + transFreeMsg(transContFromHead((char*)pHead)); + + for (int i = 0; ahandle == 0 && i < transQueueSize(&conn->cliMsgs); i++) { + SCliMsg* cliMsg = transQueueGet(&conn->cliMsgs, i); + if (cliMsg->type == Release) { + assert(pMsg == NULL); + return true; + } + } + + cliIteraConnMsgs(conn); + + tDebug("%s conn %p receive release request, refId:%" PRId64 "", CONN_GET_INST_LABEL(conn), conn, conn->refId); + destroyCmsg(pMsg); + + addConnToPool(((SCliThrd*)conn->hostThrd)->pool, conn); + return true; + } + return false; +} + static void* cliWorkThread(void* arg) { SCliThrd* pThrd = (SCliThrd*)arg; pThrd->pid = taosGetSelfPthreadId(); @@ -1398,7 +1442,7 @@ int transReleaseCliHandle(void* handle) { } STransMsg tmsg = {.info.handle = handle}; - // TRACE_SET_MSGID(&tmsg.info.traceId, tGenIdPI64()); + TRACE_SET_MSGID(&tmsg.info.traceId, tGenIdPI64()); SCliMsg* cmsg = taosMemoryCalloc(1, sizeof(SCliMsg)); cmsg->msg = tmsg; @@ -1426,7 +1470,7 @@ int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STran if (pThrd == NULL && valid == false) { transFreeMsg(pReq->pCont); transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); - return -1; + return TSDB_CODE_RPC_BROKEN_LINK; } TRACE_SET_MSGID(&pReq->info.traceId, tGenIdPI64()); @@ -1471,7 +1515,7 @@ int transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransMs if (pThrd == NULL && valid == false) { transFreeMsg(pReq->pCont); transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); - return -1; + return TSDB_CODE_RPC_BROKEN_LINK; } tsem_t* sem = taosMemoryCalloc(1, sizeof(tsem_t)); diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c index b568163e23ec75dfa626c2c3618391e94c838d2e..a4d679b281512ff13757eab7c9c42a11e0edb36b 100644 --- a/source/libs/transport/src/transComm.c +++ b/source/libs/transport/src/transComm.c @@ -77,7 +77,7 @@ void transFreeMsg(void* msg) { } taosMemoryFree((char*)msg - sizeof(STransMsgHead)); } -int transGetSockDebugInfo(struct sockaddr* sockname, char* dst) { +int transSockInfo2Str(struct sockaddr* sockname, char* dst) { struct sockaddr_in addr = *(struct sockaddr_in*)sockname; char buf[20] = {0}; @@ -91,6 +91,7 @@ int transInitBuffer(SConnBuffer* buf) { buf->left = -1; buf->len = 0; buf->total = 0; + buf->invalid = 0; return 0; } int transDestroyBuffer(SConnBuffer* p) { @@ -108,19 +109,25 @@ int transClearBuffer(SConnBuffer* buf) { p->left = -1; p->len = 0; p->total = 0; + p->invalid = 0; return 0; } int transDumpFromBuffer(SConnBuffer* connBuf, char** buf) { + static const int HEADSIZE = sizeof(STransMsgHead); + SConnBuffer* p = connBuf; if (p->left != 0) { return -1; } int total = connBuf->total; - *buf = taosMemoryCalloc(1, total); - memcpy(*buf, p->buf, total); - - transResetBuffer(connBuf); + if (total >= HEADSIZE && !p->invalid) { + *buf = taosMemoryCalloc(1, total); + memcpy(*buf, p->buf, total); + transResetBuffer(connBuf); + } else { + total = -1; + } return total; } @@ -173,6 +180,7 @@ bool transReadComplete(SConnBuffer* connBuf) { memcpy((char*)&head, connBuf->buf, sizeof(head)); int32_t msgLen = (int32_t)htonl(head.msgLen); p->total = msgLen; + p->invalid = TRANS_NOVALID_PACKET(htonl(head.magicNum)); } if (p->total >= p->len) { p->left = p->total - p->len; @@ -180,7 +188,7 @@ bool transReadComplete(SConnBuffer* connBuf) { p->left = 0; } } - return p->left == 0 ? true : false; + return (p->left == 0 || p->invalid) ? true : false; } int transSetConnOption(uv_tcp_t* stream) { diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index 4d35e346b17d81e108d33e5c2f1e6da376047f5e..46046b2a95f5600ec96b1d69904c2f83e87851d7 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -114,6 +114,8 @@ static void uvAcceptAsyncCb(uv_async_t* handle); static void uvShutDownCb(uv_shutdown_t* req, int status); static void uvPrepareCb(uv_prepare_t* handle); +static bool uvRecvReleaseReq(SSvrConn* conn, STransMsgHead* pHead); + /* * time-consuming task throwed into BG work thread */ @@ -123,7 +125,7 @@ static void uvWorkAfterTask(uv_work_t* req, int status); static void uvWalkCb(uv_handle_t* handle, void* arg); static void uvFreeCb(uv_handle_t* handle); -static void uvStartSendRespInternal(SSvrMsg* smsg); +static void uvStartSendRespImpl(SSvrMsg* smsg); static void uvPrepareSendData(SSvrMsg* msg, uv_buf_t* wb); static void uvStartSendResp(SSvrMsg* msg); @@ -154,37 +156,6 @@ static void* transAcceptThread(void* arg); static bool addHandleToWorkloop(SWorkThrd* pThrd, char* pipeName); static bool addHandleToAcceptloop(void* arg); -#define CONN_SHOULD_RELEASE(conn, head) \ - do { \ - if ((head)->release == 1 && (head->msgLen) == sizeof(*head)) { \ - reallocConnRef(conn); \ - tTrace("conn %p received release request", conn); \ - \ - STraceId traceId = head->traceId; \ - conn->status = ConnRelease; \ - transClearBuffer(&conn->readBuf); \ - transFreeMsg(transContFromHead((char*)head)); \ - \ - STransMsg tmsg = { \ - .code = 0, .info.handle = (void*)conn, .info.traceId = traceId, .info.ahandle = (void*)0x9527}; \ - SSvrMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSvrMsg)); \ - srvMsg->msg = tmsg; \ - srvMsg->type = Release; \ - srvMsg->pConn = conn; \ - if (!transQueuePush(&conn->srvMsgs, srvMsg)) { \ - return; \ - } \ - if (conn->regArg.init) { \ - tTrace("conn %p release, notify server app", conn); \ - STrans* pTransInst = conn->pTransInst; \ - (*pTransInst->cfp)(pTransInst->parent, &(conn->regArg.msg), NULL); \ - memset(&conn->regArg, 0, sizeof(conn->regArg)); \ - } \ - uvStartSendRespInternal(srvMsg); \ - return; \ - } \ - } while (0) - #define SRV_RELEASE_UV(loop) \ do { \ uv_walk(loop, uvWalkCb, NULL); \ @@ -212,17 +183,25 @@ static void uvHandleActivityTimeout(uv_timer_t* handle) { tDebug("%p timeout since no activity", conn); } -static void uvHandleReq(SSvrConn* pConn) { - STransMsgHead* msg = NULL; - int msgLen = 0; +static bool uvHandleReq(SSvrConn* pConn) { + STrans* pTransInst = pConn->pTransInst; - msgLen = transDumpFromBuffer(&pConn->readBuf, (char**)&msg); + STransMsgHead* msg = NULL; + int msgLen = transDumpFromBuffer(&pConn->readBuf, (char**)&msg); + if (msgLen <= 0) { + tError("%s conn %p read invalid packet", transLabel(pTransInst), pConn); + return false; + } STransMsgHead* pHead = (STransMsgHead*)msg; pHead->code = htonl(pHead->code); pHead->msgLen = htonl(pHead->msgLen); memcpy(pConn->user, pHead->user, strlen(pHead->user)); + if (uvRecvReleaseReq(pConn, pHead)) { + return true; + } + // TODO(dengyihao): time-consuming task throwed into BG Thread // uv_work_t* wreq = taosMemoryMalloc(sizeof(uv_work_t)); // wreq->data = pConn; @@ -230,8 +209,6 @@ static void uvHandleReq(SSvrConn* pConn) { // transRefSrvHandle(pConn); // uv_queue_work(((SWorkThrd*)pConn->hostThrd)->loop, wreq, uvWorkDoTask, uvWorkAfterTask); - CONN_SHOULD_RELEASE(pConn, pHead); - STransMsg transMsg; memset(&transMsg, 0, sizeof(transMsg)); transMsg.contLen = transContLenFromMsg(pHead->msgLen); @@ -247,7 +224,6 @@ static void uvHandleReq(SSvrConn* pConn) { tDebug("conn %p acquired by server app", pConn); } } - STrans* pTransInst = pConn->pTransInst; STraceId* trace = &pHead->traceId; if (pConn->status == ConnNormal && pHead->noResp == 0) { transRefSrvHandle(pConn); @@ -285,21 +261,34 @@ static void uvHandleReq(SSvrConn* pConn) { transReleaseExHandle(transGetRefMgt(), pConn->refId); (*pTransInst->cfp)(pTransInst->parent, &transMsg, NULL); + return true; } void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) { - // opt - SSvrConn* conn = cli->data; + SSvrConn* conn = cli->data; + STrans* pTransInst = conn->pTransInst; + SConnBuffer* pBuf = &conn->readBuf; - STrans* pTransInst = conn->pTransInst; if (nread > 0) { pBuf->len += nread; tTrace("%s conn %p total read:%d, current read:%d", transLabel(pTransInst), conn, pBuf->len, (int)nread); - while (transReadComplete(pBuf)) { - tTrace("%s conn %p alread read complete packet", transLabel(pTransInst), conn); - uvHandleReq(conn); + if (pBuf->len <= TRANS_PACKET_LIMIT) { + while (transReadComplete(pBuf)) { + tTrace("%s conn %p alread read complete packet", transLabel(pTransInst), conn); + if (true == pBuf->invalid || false == uvHandleReq(conn)) { + tError("%s conn %p read invalid packet, received from %s, local info:%s", transLabel(pTransInst), conn, + conn->dst, conn->src); + destroyConn(conn, true); + return; + } + } + return; + } else { + tError("%s conn %p read invalid packet, exceed limit, received from %s, local info:", transLabel(pTransInst), + conn, conn->dst, conn->src); + destroyConn(conn, true); + return; } - return; } if (nread == 0) { return; @@ -356,10 +345,10 @@ void uvOnSendCb(uv_write_t* req, int status) { msg = (SSvrMsg*)transQueueGet(&conn->srvMsgs, 0); if (msg != NULL) { - uvStartSendRespInternal(msg); + uvStartSendRespImpl(msg); } } else { - uvStartSendRespInternal(msg); + uvStartSendRespImpl(msg); } } } @@ -391,6 +380,7 @@ static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) { pHead->ahandle = (uint64_t)pMsg->info.ahandle; pHead->traceId = pMsg->info.traceId; pHead->hasEpSet = pMsg->info.hasEpSet; + pHead->magicNum = htonl(TRANS_MAGIC_NUM); if (pConn->status == ConnNormal) { pHead->msgType = (0 == pMsg->msgType ? pConn->inType + 1 : pMsg->msgType); @@ -423,7 +413,7 @@ static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) { wb->len = len; } -static void uvStartSendRespInternal(SSvrMsg* smsg) { +static void uvStartSendRespImpl(SSvrMsg* smsg) { SSvrConn* pConn = smsg->pConn; if (pConn->broken) { return; @@ -453,7 +443,7 @@ static void uvStartSendResp(SSvrMsg* smsg) { if (!transQueuePush(&pConn->srvMsgs, smsg)) { return; } - uvStartSendRespInternal(smsg); + uvStartSendRespImpl(smsg); return; } @@ -502,7 +492,6 @@ void uvWorkerAsyncCb(uv_async_t* handle) { // release handle to rpc init if (msg->type == Quit) { (*transAsyncHandle[msg->type])(msg, pThrd); - continue; } else { STransMsg transMsg = msg->msg; @@ -544,6 +533,35 @@ static void uvShutDownCb(uv_shutdown_t* req, int status) { uv_close((uv_handle_t*)req->handle, uvDestroyConn); taosMemoryFree(req); } +static bool uvRecvReleaseReq(SSvrConn* pConn, STransMsgHead* pHead) { + if ((pHead)->release == 1 && (pHead->msgLen) == sizeof(*pHead)) { + reallocConnRef(pConn); + tTrace("conn %p received release request", pConn); + + STraceId traceId = pHead->traceId; + pConn->status = ConnRelease; + transClearBuffer(&pConn->readBuf); + transFreeMsg(transContFromHead((char*)pHead)); + + STransMsg tmsg = {.code = 0, .info.handle = (void*)pConn, .info.traceId = traceId, .info.ahandle = (void*)0x9527}; + SSvrMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSvrMsg)); + srvMsg->msg = tmsg; + srvMsg->type = Release; + srvMsg->pConn = pConn; + if (!transQueuePush(&pConn->srvMsgs, srvMsg)) { + return true; + } + if (pConn->regArg.init) { + tTrace("conn %p release, notify server app", pConn); + STrans* pTransInst = pConn->pTransInst; + (*pTransInst->cfp)(pTransInst->parent, &(pConn->regArg.msg), NULL); + memset(&pConn->regArg, 0, sizeof(pConn->regArg)); + } + uvStartSendRespImpl(srvMsg); + return true; + } + return false; +} static void uvPrepareCb(uv_prepare_t* handle) { // prepare callback SWorkThrd* pThrd = handle->data; @@ -632,7 +650,7 @@ void uvOnAcceptCb(uv_stream_t* stream, int status) { pObj->workerIdx = (pObj->workerIdx + 1) % pObj->numOfThreads; - tTrace("new conntion accepted by main server, dispatch to %dth worker-thread", pObj->workerIdx); + tTrace("new connection accepted by main server, dispatch to %dth worker-thread", pObj->workerIdx); uv_write2(wr, (uv_stream_t*)&(pObj->pipe[pObj->workerIdx][0]), &buf, 1, (uv_stream_t*)cli, uvOnPipeWriteCb); } else { @@ -696,7 +714,7 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) { transUnrefSrvHandle(pConn); return; } - transGetSockDebugInfo(&peername, pConn->dst); + transSockInfo2Str(&peername, pConn->dst); addrlen = sizeof(sockname); if (0 != uv_tcp_getsockname(pConn->pTcp, (struct sockaddr*)&sockname, &addrlen)) { @@ -704,7 +722,7 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) { transUnrefSrvHandle(pConn); return; } - transGetSockDebugInfo(&sockname, pConn->src); + transSockInfo2Str(&sockname, pConn->src); struct sockaddr_in addr = *(struct sockaddr_in*)&sockname; pConn->clientIp = addr.sin_addr.s_addr; @@ -752,7 +770,7 @@ static bool addHandleToWorkloop(SWorkThrd* pThrd, char* pipeName) { // conn set QUEUE_INIT(&pThrd->conn); - pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 1, pThrd, uvWorkerAsyncCb); + pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 5, pThrd, uvWorkerAsyncCb); uv_pipe_connect(&pThrd->connect_req, pThrd->pipe, pipeName, uvOnPipeConnectionCb); // uv_read_start((uv_stream_t*)pThrd->pipe, uvAllocConnBufferCb, uvOnConnectionCb); return true; @@ -857,6 +875,7 @@ static int reallocConnRef(SSvrConn* conn) { } static void uvDestroyConn(uv_handle_t* handle) { SSvrConn* conn = handle->data; + if (conn == NULL) { return; } @@ -872,9 +891,8 @@ static void uvDestroyConn(uv_handle_t* handle) { SSvrMsg* msg = transQueueGet(&conn->srvMsgs, i); destroySmsg(msg); } - - transReqQueueClear(&conn->wreqQueue); transQueueDestroy(&conn->srvMsgs); + transReqQueueClear(&conn->wreqQueue); QUEUE_REMOVE(&conn->queue); taosMemoryFree(conn->pTcp); @@ -992,7 +1010,7 @@ void uvHandleRelease(SSvrMsg* msg, SWorkThrd* thrd) { if (!transQueuePush(&conn->srvMsgs, msg)) { return; } - uvStartSendRespInternal(msg); + uvStartSendRespImpl(msg); return; } else if (conn->status == ConnRelease || conn->status == ConnNormal) { tDebug("%s conn %p already released, ignore release-msg", transLabel(thrd->pTransInst), conn); diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index a8da6809100fa5789e5b7e57e051631257782e1e..93ced912f8e2358c2aab6f04957ce060cf61c924 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -121,7 +121,7 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal) { if (found == NULL) { // file corrupted, no complete log // TODO delete and search in previous files - ASSERT(0); + /*ASSERT(0);*/ terrno = TSDB_CODE_WAL_FILE_CORRUPTED; return -1; } @@ -221,7 +221,6 @@ int walCheckAndRepairMeta(SWal* pWal) { int code = walSaveMeta(pWal); if (code < 0) { - taosArrayDestroy(actualLog); return -1; } } diff --git a/source/os/src/osDir.c b/source/os/src/osDir.c index b755a35815fb64d6fa11ff3e0c35efc647318b83..30aaa01dae0bf26bb930271f056d77226e808a4d 100644 --- a/source/os/src/osDir.c +++ b/source/os/src/osDir.c @@ -133,6 +133,7 @@ int32_t taosMulMkDir(const char *dirname) { code = mkdir(temp, 0755); #endif if (code < 0 && errno != EEXIST) { + terrno = TAOS_SYSTEM_ERROR(errno); return code; } *pos = TD_DIRSEP[0]; @@ -146,6 +147,7 @@ int32_t taosMulMkDir(const char *dirname) { code = mkdir(temp, 0755); #endif if (code < 0 && errno != EEXIST) { + terrno = TAOS_SYSTEM_ERROR(errno); return code; } } diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c index 6c8e949b25a17047e58552f76fbbc6b3a5103669..fab933755a73ba23be962cb76b34da002b8a3702 100644 --- a/source/os/src/osFile.c +++ b/source/os/src/osFile.c @@ -203,10 +203,11 @@ int32_t taosRenameFile(const char *oldName, const char *newName) { } int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime) { - struct stat fileStat; #ifdef WINDOWS - int32_t code = _stat(path, &fileStat); + struct _stati64 fileStat; + int32_t code = _stati64(path, &fileStat); #else + struct stat fileStat; int32_t code = stat(path, &fileStat); #endif if (code < 0) { @@ -312,6 +313,7 @@ TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) { assert(!(tdFileOptions & TD_FILE_EXCL)); fp = fopen(path, mode); if (fp == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); return NULL; } } else { @@ -334,6 +336,7 @@ TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) { fd = open(path, access, S_IRWXU | S_IRWXG | S_IRWXO); #endif if (fd == -1) { + terrno = TAOS_SYSTEM_ERROR(errno); return NULL; } } @@ -440,10 +443,10 @@ int64_t taosPReadFile(TdFilePtr pFile, void *buf, int64_t count, int64_t offset) #endif assert(pFile->fd >= 0); // Please check if you have closed the file. #ifdef WINDOWS - size_t pos = _lseek(pFile->fd, 0, SEEK_CUR); - _lseek(pFile->fd, offset, SEEK_SET); + size_t pos = _lseeki64(pFile->fd, 0, SEEK_CUR); + _lseeki64(pFile->fd, offset, SEEK_SET); int64_t ret = _read(pFile->fd, buf, count); - _lseek(pFile->fd, pos, SEEK_SET); + _lseeki64(pFile->fd, pos, SEEK_SET); #else int64_t ret = pread(pFile->fd, buf, count, offset); #endif @@ -493,7 +496,7 @@ int64_t taosLSeekFile(TdFilePtr pFile, int64_t offset, int32_t whence) { #endif assert(pFile->fd >= 0); // Please check if you have closed the file. #ifdef WINDOWS - int64_t ret = _lseek(pFile->fd, offset, whence); + int64_t ret = _lseeki64(pFile->fd, offset, whence); #else int64_t ret = lseek(pFile->fd, offset, whence); #endif @@ -637,7 +640,7 @@ int64_t taosFSendFile(TdFilePtr pFileOut, TdFilePtr pFileIn, int64_t *offset, in #ifdef WINDOWS - _lseek(pFileIn->fd, (int32_t)(*offset), 0); + _lseeki64(pFileIn->fd, *offset, 0); int64_t writeLen = 0; uint8_t buffer[_SEND_FILE_STEP_] = {0}; diff --git a/source/os/src/osRand.c b/source/os/src/osRand.c index 461a72e96297aa8f1b981d0b52c9fec6ce2e0916..bd2bfa486ec88c87f31c3e95e589ffad777d9be1 100644 --- a/source/os/src/osRand.c +++ b/source/os/src/osRand.c @@ -37,9 +37,13 @@ uint32_t taosRandR(uint32_t *pSeed) { uint32_t taosSafeRand(void) { #ifdef WINDOWS - uint32_t seed; + uint32_t seed = taosRand(); HCRYPTPROV hCryptProv; - if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, 0)) return seed; + if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, 0)) { + if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, CRYPT_NEWKEYSET)) { + return seed; + } + } if (hCryptProv != NULL) { if (!CryptGenRandom(hCryptProv, 4, &seed)) return seed; } diff --git a/source/os/src/osSemaphore.c b/source/os/src/osSemaphore.c index a7d2ba85311b8f2a9ababbde0f1f5857cb354484..8cc6f0ef2e2b436624cc961315e3ecff6db7691b 100644 --- a/source/os/src/osSemaphore.c +++ b/source/os/src/osSemaphore.c @@ -392,179 +392,32 @@ int32_t tsem_timewait(tsem_t* sem, int64_t nanosecs) { // *sem = NULL; // return 0; // } -typedef struct { - pthread_mutex_t count_lock; - pthread_cond_t count_bump; - unsigned int count; -} bosal_sem_t; int tsem_init(tsem_t *psem, int flags, unsigned int count) { - bosal_sem_t *pnewsem; - int result; - - pnewsem = (bosal_sem_t *)malloc(sizeof(bosal_sem_t)); - if (!pnewsem) { - return -1; - } - result = pthread_mutex_init(&pnewsem->count_lock, NULL); - if (result) { - free(pnewsem); - return result; - } - result = pthread_cond_init(&pnewsem->count_bump, NULL); - if (result) { - pthread_mutex_destroy(&pnewsem->count_lock); - free(pnewsem); - return result; - } - pnewsem->count = count; - *psem = (tsem_t)pnewsem; + *psem = dispatch_semaphore_create(count); + if (*psem == NULL) return -1; return 0; } int tsem_destroy(tsem_t *psem) { - bosal_sem_t *poldsem; - - if (!psem) { - return EINVAL; - } - poldsem = (bosal_sem_t *)*psem; - - pthread_mutex_destroy(&poldsem->count_lock); - pthread_cond_destroy(&poldsem->count_bump); - free(poldsem); return 0; } int tsem_post(tsem_t *psem) { - bosal_sem_t *pxsem; - int result, xresult; - - if (!psem) { - return EINVAL; - } - pxsem = (bosal_sem_t *)*psem; - - result = pthread_mutex_lock(&pxsem->count_lock); - if (result) { - return result; - } - pxsem->count = pxsem->count + 1; - - xresult = pthread_cond_signal(&pxsem->count_bump); - - result = pthread_mutex_unlock(&pxsem->count_lock); - if (result) { - return result; - } - if (xresult) { - errno = xresult; - return -1; - } - return 0; -} - -int tsem_trywait(tsem_t *psem) { - bosal_sem_t *pxsem; - int result, xresult; - - if (!psem) { - return EINVAL; - } - pxsem = (bosal_sem_t *)*psem; - - result = pthread_mutex_lock(&pxsem->count_lock); - if (result) { - return result; - } - xresult = 0; - - if (pxsem->count > 0) { - pxsem->count--; - } else { - xresult = EAGAIN; - } - result = pthread_mutex_unlock(&pxsem->count_lock); - if (result) { - return result; - } - if (xresult) { - errno = xresult; - return -1; - } + if (psem == NULL || *psem == NULL) return -1; + dispatch_semaphore_signal(*psem); return 0; } int tsem_wait(tsem_t *psem) { - bosal_sem_t *pxsem; - int result, xresult; - - if (!psem) { - return EINVAL; - } - pxsem = (bosal_sem_t *)*psem; - - result = pthread_mutex_lock(&pxsem->count_lock); - if (result) { - return result; - } - xresult = 0; - - if (pxsem->count == 0) { - xresult = pthread_cond_wait(&pxsem->count_bump, &pxsem->count_lock); - } - if (!xresult) { - if (pxsem->count > 0) { - pxsem->count--; - } - } - result = pthread_mutex_unlock(&pxsem->count_lock); - if (result) { - return result; - } - if (xresult) { - errno = xresult; - return -1; - } + if (psem == NULL || *psem == NULL) return -1; + dispatch_semaphore_wait(*psem, DISPATCH_TIME_FOREVER); return 0; } int tsem_timewait(tsem_t *psem, int64_t nanosecs) { - struct timespec abstim = { - .tv_sec = 0, - .tv_nsec = nanosecs, - }; - - bosal_sem_t *pxsem; - int result, xresult; - - if (!psem) { - return EINVAL; - } - pxsem = (bosal_sem_t *)*psem; - - result = pthread_mutex_lock(&pxsem->count_lock); - if (result) { - return result; - } - xresult = 0; - - if (pxsem->count == 0) { - xresult = pthread_cond_timedwait(&pxsem->count_bump, &pxsem->count_lock, &abstim); - } - if (!xresult) { - if (pxsem->count > 0) { - pxsem->count--; - } - } - result = pthread_mutex_unlock(&pxsem->count_lock); - if (result) { - return result; - } - if (xresult) { - errno = xresult; - return -1; - } + if (psem == NULL || *psem == NULL) return -1; + dispatch_semaphore_wait(*psem, nanosecs); return 0; } diff --git a/source/os/src/osSysinfo.c b/source/os/src/osSysinfo.c index 3a75e18a7f8c1b60a86e8aac75f5c5f624176e5a..19e9568bbebf20a74e5f316bb50056efa4786c1a 100644 --- a/source/os/src/osSysinfo.c +++ b/source/os/src/osSysinfo.c @@ -595,6 +595,7 @@ int32_t taosGetDiskSize(char *dataDir, SDiskSize *diskSize) { #else struct statvfs info; if (statvfs(dataDir, &info)) { + terrno = TAOS_SYSTEM_ERROR(errno); return -1; } else { diskSize->total = info.f_blocks * info.f_frsize; @@ -851,13 +852,12 @@ char *taosGetCmdlineByPID(int pid) { } void taosSetCoreDump(bool enable) { + if (!enable) return; #ifdef WINDOWS - // SetUnhandledExceptionFilter(exceptionHandler); - // SetUnhandledExceptionFilter(&FlCrashDump); + SetUnhandledExceptionFilter(exceptionHandler); + SetUnhandledExceptionFilter(&FlCrashDump); #elif defined(_TD_DARWIN_64) #else - if (!enable) return; - // 1. set ulimit -c unlimited struct rlimit rlim; struct rlimit rlim_new; diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c index 3c4a0a20bdc4f43b14a4f6d476141cb7416db0e6..454739348eec9cd4fa59f5ec359890be2c09c816 100644 --- a/source/util/src/tarray.c +++ b/source/util/src/tarray.c @@ -386,6 +386,7 @@ void* taosArrayDestroy(SArray* pArray) { } void taosArrayDestroyP(SArray* pArray, FDelete fp) { + if(!pArray) return; for (int32_t i = 0; i < pArray->size; i++) { fp(*(void**)TARRAY_GET_ELEM(pArray, i)); } diff --git a/source/util/src/tcache.c b/source/util/src/tcache.c index dd61f7d22548a223bb33f01fb40b9dcd7423e5af..f9f42aa103d2b7ccbb95c60e5ae7e45c95e51699 100644 --- a/source/util/src/tcache.c +++ b/source/util/src/tcache.c @@ -702,7 +702,7 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { taosMsleep(50); } - uInfo("cache:%s will be cleaned up", pCacheObj->name); + uTrace("cache:%s will be cleaned up", pCacheObj->name); doCleanupDataCache(pCacheObj); } diff --git a/source/util/src/tcompare.c b/source/util/src/tcompare.c index fe3065b2b78a46a85d6dc04b90fcff4e0fe80f03..7032f397442464681557e00589d183011fe6b2d0 100644 --- a/source/util/src/tcompare.c +++ b/source/util/src/tcompare.c @@ -186,15 +186,16 @@ int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) { int32_t len1 = varDataLen(pLeft); int32_t len2 = varDataLen(pRight); - if (len1 != len2) { - return len1 > len2 ? 1 : -1; - } else { - int32_t ret = strncmp(varDataVal(pLeft), varDataVal(pRight), len1); - if (ret == 0) { + int32_t minLen = TMIN(len1, len2); + int32_t ret = strncmp(varDataVal(pLeft), varDataVal(pRight), minLen); + if (ret == 0) { + if (len1 == len2) { return 0; } else { - return ret > 0 ? 1 : -1; + return len1 > len2 ? 1 : -1; } + } else { + return ret > 0 ? 1 : -1; } } @@ -246,6 +247,756 @@ int32_t compareJsonVal(const void *pLeft, const void *pRight) { } } +int32_t compareInt8Int16(const void *pLeft, const void *pRight) { + int8_t left = GET_INT32_VAL(pLeft); + int16_t right = GET_INT16_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt8Int32(const void *pLeft, const void *pRight) { + int8_t left = GET_INT32_VAL(pLeft); + int32_t right = GET_INT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt8Int64(const void *pLeft, const void *pRight) { + int8_t left = GET_INT32_VAL(pLeft); + int64_t right = GET_INT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt8Float(const void *pLeft, const void *pRight) { + int8_t left = GET_INT32_VAL(pLeft); + float right = GET_FLOAT_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt8Double(const void *pLeft, const void *pRight) { + int8_t left = GET_INT32_VAL(pLeft); + double right = GET_DOUBLE_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt8Uint8(const void *pLeft, const void *pRight) { + int8_t left = GET_INT32_VAL(pLeft); + uint8_t right = GET_UINT8_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt8Uint16(const void *pLeft, const void *pRight) { + int8_t left = GET_INT32_VAL(pLeft); + uint16_t right = GET_UINT16_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt8Uint32(const void *pLeft, const void *pRight) { + int8_t left = GET_INT32_VAL(pLeft); + uint32_t right = GET_UINT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt8Uint64(const void *pLeft, const void *pRight) { + int8_t left = GET_INT32_VAL(pLeft); + uint64_t right = GET_UINT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt16Int8(const void *pLeft, const void *pRight) { + int16_t left = GET_INT32_VAL(pLeft); + int8_t right = GET_INT8_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt16Int32(const void *pLeft, const void *pRight) { + int16_t left = GET_INT32_VAL(pLeft); + int32_t right = GET_INT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt16Int64(const void *pLeft, const void *pRight) { + int16_t left = GET_INT32_VAL(pLeft); + int64_t right = GET_INT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt16Float(const void *pLeft, const void *pRight) { + int16_t left = GET_INT32_VAL(pLeft); + float right = GET_FLOAT_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt16Double(const void *pLeft, const void *pRight) { + int16_t left = GET_INT32_VAL(pLeft); + double right = GET_DOUBLE_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt16Uint8(const void *pLeft, const void *pRight) { + int16_t left = GET_INT32_VAL(pLeft); + uint8_t right = GET_UINT8_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt16Uint16(const void *pLeft, const void *pRight) { + int16_t left = GET_INT32_VAL(pLeft); + uint16_t right = GET_UINT16_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt16Uint32(const void *pLeft, const void *pRight) { + int16_t left = GET_INT32_VAL(pLeft); + uint32_t right = GET_UINT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt16Uint64(const void *pLeft, const void *pRight) { + int16_t left = GET_INT32_VAL(pLeft); + uint64_t right = GET_UINT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + + +int32_t compareInt32Int8(const void *pLeft, const void *pRight) { + int32_t left = GET_INT32_VAL(pLeft); + int8_t right = GET_INT8_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt32Int16(const void *pLeft, const void *pRight) { + int32_t left = GET_INT32_VAL(pLeft); + int16_t right = GET_INT16_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt32Int64(const void *pLeft, const void *pRight) { + int32_t left = GET_INT32_VAL(pLeft); + int64_t right = GET_INT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt32Float(const void *pLeft, const void *pRight) { + int32_t left = GET_INT32_VAL(pLeft); + float right = GET_FLOAT_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt32Double(const void *pLeft, const void *pRight) { + int32_t left = GET_INT32_VAL(pLeft); + double right = GET_DOUBLE_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt32Uint8(const void *pLeft, const void *pRight) { + int32_t left = GET_INT32_VAL(pLeft); + uint8_t right = GET_UINT8_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt32Uint16(const void *pLeft, const void *pRight) { + int32_t left = GET_INT32_VAL(pLeft); + uint16_t right = GET_UINT16_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt32Uint32(const void *pLeft, const void *pRight) { + int32_t left = GET_INT32_VAL(pLeft); + uint32_t right = GET_UINT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt32Uint64(const void *pLeft, const void *pRight) { + int32_t left = GET_INT32_VAL(pLeft); + uint64_t right = GET_UINT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt64Int8(const void *pLeft, const void *pRight) { + int64_t left = GET_INT64_VAL(pLeft); + int8_t right = GET_INT8_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt64Int16(const void *pLeft, const void *pRight) { + int64_t left = GET_INT64_VAL(pLeft); + int16_t right = GET_INT16_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt64Int32(const void *pLeft, const void *pRight) { + int64_t left = GET_INT64_VAL(pLeft); + int32_t right = GET_INT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt64Float(const void *pLeft, const void *pRight) { + int64_t left = GET_INT64_VAL(pLeft); + float right = GET_FLOAT_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt64Double(const void *pLeft, const void *pRight) { + int64_t left = GET_INT64_VAL(pLeft); + double right = GET_DOUBLE_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt64Uint8(const void *pLeft, const void *pRight) { + int64_t left = GET_INT64_VAL(pLeft); + uint8_t right = GET_UINT8_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt64Uint16(const void *pLeft, const void *pRight) { + int64_t left = GET_INT64_VAL(pLeft); + uint16_t right = GET_UINT16_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt64Uint32(const void *pLeft, const void *pRight) { + int64_t left = GET_INT64_VAL(pLeft); + uint32_t right = GET_UINT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt64Uint64(const void *pLeft, const void *pRight) { + int64_t left = GET_INT64_VAL(pLeft); + uint64_t right = GET_UINT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareFloatInt8(const void *pLeft, const void *pRight) { + float left = GET_FLOAT_VAL(pLeft); + int8_t right = GET_INT8_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareFloatInt16(const void *pLeft, const void *pRight) { + float left = GET_FLOAT_VAL(pLeft); + int16_t right = GET_INT16_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareFloatInt32(const void *pLeft, const void *pRight) { + float left = GET_FLOAT_VAL(pLeft); + int32_t right = GET_INT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareFloatInt64(const void *pLeft, const void *pRight) { + float left = GET_FLOAT_VAL(pLeft); + int64_t right = GET_INT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareFloatDouble(const void *pLeft, const void *pRight) { + float left = GET_FLOAT_VAL(pLeft); + double right = GET_DOUBLE_VAL(pRight); + + if (isnan(left) && isnan(right)) { + return 0; + } + + if (isnan(left)) { + return -1; + } + + if (isnan(right)) { + return 1; + } + + if (FLT_EQUAL(left, right)) { + return 0; + } + return FLT_GREATER(left, right) ? 1 : -1; +} + +int32_t compareFloatUint8(const void *pLeft, const void *pRight) { + float left = GET_FLOAT_VAL(pLeft); + uint8_t right = GET_UINT8_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareFloatUint16(const void *pLeft, const void *pRight) { + float left = GET_FLOAT_VAL(pLeft); + uint16_t right = GET_UINT16_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareFloatUint32(const void *pLeft, const void *pRight) { + float left = GET_FLOAT_VAL(pLeft); + uint32_t right = GET_UINT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareFloatUint64(const void *pLeft, const void *pRight) { + float left = GET_FLOAT_VAL(pLeft); + uint64_t right = GET_UINT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareDoubleInt8(const void *pLeft, const void *pRight) { + double left = GET_DOUBLE_VAL(pLeft); + int8_t right = GET_INT8_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareDoubleInt16(const void *pLeft, const void *pRight) { + double left = GET_DOUBLE_VAL(pLeft); + int16_t right = GET_INT16_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareDoubleInt32(const void *pLeft, const void *pRight) { + double left = GET_DOUBLE_VAL(pLeft); + int32_t right = GET_INT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareDoubleInt64(const void *pLeft, const void *pRight) { + double left = GET_DOUBLE_VAL(pLeft); + int64_t right = GET_INT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareDoubleFloat(const void *pLeft, const void *pRight) { + double left = GET_DOUBLE_VAL(pLeft); + float right = GET_FLOAT_VAL(pRight); + + if (isnan(left) && isnan(right)) { + return 0; + } + + if (isnan(left)) { + return -1; + } + + if (isnan(right)) { + return 1; + } + + if (FLT_EQUAL(left, right)) { + return 0; + } + return FLT_GREATER(left, right) ? 1 : -1; +} + +int32_t compareDoubleUint8(const void *pLeft, const void *pRight) { + double left = GET_DOUBLE_VAL(pLeft); + uint8_t right = GET_UINT8_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareDoubleUint16(const void *pLeft, const void *pRight) { + double left = GET_DOUBLE_VAL(pLeft); + uint16_t right = GET_UINT16_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareDoubleUint32(const void *pLeft, const void *pRight) { + double left = GET_DOUBLE_VAL(pLeft); + uint32_t right = GET_UINT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareDoubleUint64(const void *pLeft, const void *pRight) { + double left = GET_DOUBLE_VAL(pLeft); + uint64_t right = GET_UINT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint8Int8(const void *pLeft, const void *pRight) { + uint8_t left = GET_UINT8_VAL(pLeft); + int8_t right = GET_INT8_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint8Int16(const void *pLeft, const void *pRight) { + uint8_t left = GET_UINT8_VAL(pLeft); + int16_t right = GET_INT16_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint8Int32(const void *pLeft, const void *pRight) { + uint8_t left = GET_UINT8_VAL(pLeft); + int32_t right = GET_INT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint8Int64(const void *pLeft, const void *pRight) { + uint8_t left = GET_UINT8_VAL(pLeft); + int64_t right = GET_INT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint8Float(const void *pLeft, const void *pRight) { + uint8_t left = GET_UINT8_VAL(pLeft); + float right = GET_FLOAT_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint8Double(const void *pLeft, const void *pRight) { + uint8_t left = GET_UINT8_VAL(pLeft); + double right = GET_DOUBLE_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint8Uint16(const void *pLeft, const void *pRight) { + uint8_t left = GET_UINT8_VAL(pLeft); + uint16_t right = GET_UINT16_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint8Uint32(const void *pLeft, const void *pRight) { + uint8_t left = GET_UINT8_VAL(pLeft); + uint32_t right = GET_UINT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint8Uint64(const void *pLeft, const void *pRight) { + uint8_t left = GET_UINT8_VAL(pLeft); + uint64_t right = GET_UINT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint16Int8(const void *pLeft, const void *pRight) { + uint16_t left = GET_UINT16_VAL(pLeft); + int8_t right = GET_INT8_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint16Int16(const void *pLeft, const void *pRight) { + uint16_t left = GET_UINT16_VAL(pLeft); + int16_t right = GET_INT16_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint16Int32(const void *pLeft, const void *pRight) { + uint16_t left = GET_UINT16_VAL(pLeft); + int32_t right = GET_INT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint16Int64(const void *pLeft, const void *pRight) { + uint16_t left = GET_UINT16_VAL(pLeft); + int64_t right = GET_INT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint16Float(const void *pLeft, const void *pRight) { + uint16_t left = GET_UINT16_VAL(pLeft); + float right = GET_FLOAT_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint16Double(const void *pLeft, const void *pRight) { + uint16_t left = GET_UINT16_VAL(pLeft); + double right = GET_DOUBLE_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint16Uint8(const void *pLeft, const void *pRight) { + uint16_t left = GET_UINT16_VAL(pLeft); + uint8_t right = GET_UINT8_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint16Uint32(const void *pLeft, const void *pRight) { + uint16_t left = GET_UINT16_VAL(pLeft); + uint32_t right = GET_UINT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint16Uint64(const void *pLeft, const void *pRight) { + uint16_t left = GET_UINT16_VAL(pLeft); + uint64_t right = GET_UINT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint32Int8(const void *pLeft, const void *pRight) { + uint32_t left = GET_UINT32_VAL(pLeft); + int8_t right = GET_INT8_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint32Int16(const void *pLeft, const void *pRight) { + uint32_t left = GET_UINT32_VAL(pLeft); + int16_t right = GET_INT16_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint32Int32(const void *pLeft, const void *pRight) { + uint32_t left = GET_UINT32_VAL(pLeft); + int32_t right = GET_INT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint32Int64(const void *pLeft, const void *pRight) { + uint32_t left = GET_UINT32_VAL(pLeft); + int64_t right = GET_INT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint32Float(const void *pLeft, const void *pRight) { + uint32_t left = GET_UINT32_VAL(pLeft); + float right = GET_FLOAT_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint32Double(const void *pLeft, const void *pRight) { + uint32_t left = GET_UINT32_VAL(pLeft); + double right = GET_DOUBLE_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint32Uint8(const void *pLeft, const void *pRight) { + uint32_t left = GET_UINT32_VAL(pLeft); + uint8_t right = GET_UINT8_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint32Uint16(const void *pLeft, const void *pRight) { + uint32_t left = GET_UINT32_VAL(pLeft); + uint16_t right = GET_UINT16_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint32Uint64(const void *pLeft, const void *pRight) { + uint32_t left = GET_UINT32_VAL(pLeft); + uint64_t right = GET_UINT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint64Int8(const void *pLeft, const void *pRight) { + uint64_t left = GET_UINT64_VAL(pLeft); + int8_t right = GET_INT8_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint64Int16(const void *pLeft, const void *pRight) { + uint64_t left = GET_UINT64_VAL(pLeft); + int16_t right = GET_INT16_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint64Int32(const void *pLeft, const void *pRight) { + uint64_t left = GET_UINT64_VAL(pLeft); + int32_t right = GET_INT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint64Int64(const void *pLeft, const void *pRight) { + uint64_t left = GET_UINT64_VAL(pLeft); + int64_t right = GET_INT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint64Float(const void *pLeft, const void *pRight) { + uint64_t left = GET_UINT64_VAL(pLeft); + float right = GET_FLOAT_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint64Double(const void *pLeft, const void *pRight) { + uint64_t left = GET_UINT64_VAL(pLeft); + double right = GET_DOUBLE_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint64Uint8(const void *pLeft, const void *pRight) { + uint64_t left = GET_UINT64_VAL(pLeft); + uint8_t right = GET_UINT8_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint64Uint16(const void *pLeft, const void *pRight) { + uint64_t left = GET_UINT64_VAL(pLeft); + uint16_t right = GET_UINT16_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint64Uint32(const void *pLeft, const void *pRight) { + uint64_t left = GET_UINT64_VAL(pLeft); + uint32_t right = GET_UINT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + + int32_t compareJsonValDesc(const void *pLeft, const void *pRight) { return compareJsonVal(pRight, pLeft); } diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index e8f1f06ef10d6da3c2ae0c24b7626ccdc5b72aea..ba877915b13b6e522367637bd7713edc8feee0f3 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -83,8 +83,8 @@ int32_t tsCompressInit() { if (lossyFloat == false && lossyDouble == false) return 0; tdszInit(fPrecision, dPrecision, maxRange, curRange, Compressor); - if (lossyFloat) uInfo("lossy compression float is opened. "); - if (lossyDouble) uInfo("lossy compression double is opened. "); + if (lossyFloat) uTrace("lossy compression float is opened. "); + if (lossyDouble) uTrace("lossy compression double is opened. "); return 1; } // exit call diff --git a/source/util/src/tconfig.c b/source/util/src/tconfig.c index fdb397561dbbddba2015a2a7e149ec0a71b6be2d..2a28ec66d2f58225b7c4dde5766d3a1522ca1a3d 100644 --- a/source/util/src/tconfig.c +++ b/source/util/src/tconfig.c @@ -33,6 +33,8 @@ int32_t cfgLoadFromEnvCmd(SConfig *pConfig, const char **envCmd); int32_t cfgLoadFromApollUrl(SConfig *pConfig, const char *url); int32_t cfgSetItem(SConfig *pConfig, const char *name, const char *value, ECfgSrcType stype); +extern char **environ; + SConfig *cfgInit() { SConfig *pCfg = taosMemoryCalloc(1, sizeof(SConfig)); if (pCfg == NULL) { @@ -627,24 +629,17 @@ void cfgDumpCfg(SConfig *pCfg, bool tsc, bool dump) { } int32_t cfgLoadFromEnvVar(SConfig *pConfig) { - char *line = NULL, *name, *value, *value2, *value3; + char line[1024], *name, *value, *value2, *value3; int32_t olen, vlen, vlen2, vlen3; int32_t code = 0; - ssize_t _bytes = 0; - TdCmdPtr pCmd = taosOpenCmd("set"); - if (pCmd == NULL) { - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; - } - while (!taosEOFCmd(pCmd)) { + char **pEnv = environ; + line[1023] = 0; + while(*pEnv != NULL) { name = value = value2 = value3 = NULL; olen = vlen = vlen2 = vlen3 = 0; - _bytes = taosGetLineCmd(pCmd, &line); - if (_bytes < 0) { - break; - } - if(line[_bytes - 1] == '\n') line[_bytes - 1] = 0; + strncpy(line, *pEnv, sizeof(line)-1); + pEnv++; taosEnvToCfg(line, line); paGetToken(line, &name, &olen); @@ -671,9 +666,6 @@ int32_t cfgLoadFromEnvVar(SConfig *pConfig) { } } - taosCloseCmd(&pCmd); - if (line != NULL) taosMemoryFreeClear(line); - uInfo("load from env variables cfg success"); return 0; } @@ -1040,34 +1032,25 @@ int32_t cfgGetApollUrl(const char **envCmd, const char *envFile, char* apolloUrl index++; } - char *line = NULL; - ssize_t _bytes = 0; - TdCmdPtr pCmd = taosOpenCmd("set"); - if (pCmd != NULL) { - while (!taosEOFCmd(pCmd)) { - _bytes = taosGetLineCmd(pCmd, &line); - if (_bytes < 0) { - break; - } - if(line[_bytes - 1] == '\n') line[_bytes - 1] = 0; - if (strncmp(line, "TAOS_APOLLO_URL", 14) == 0) { - char *p = strchr(line, '='); - if (p != NULL) { + char line[1024]; + char **pEnv = environ; + line[1023] = 0; + while(*pEnv != NULL) { + strncpy(line, *pEnv, sizeof(line)-1); + pEnv++; + if (strncmp(line, "TAOS_APOLLO_URL", 14) == 0) { + char *p = strchr(line, '='); + if (p != NULL) { + p++; + if (*p == '\'') { p++; - if (*p == '\'') { - p++; - p[strlen(p)-1] = '\0'; - } - memcpy(apolloUrl, p, TMIN(strlen(p)+1,PATH_MAX)); - uInfo("get apollo url from env variables success, apolloUrl=%s",apolloUrl); - taosCloseCmd(&pCmd); - if (line != NULL) taosMemoryFreeClear(line); - return 0; + p[strlen(p)-1] = '\0'; } + memcpy(apolloUrl, p, TMIN(strlen(p)+1,PATH_MAX)); + uInfo("get apollo url from env variables success, apolloUrl=%s",apolloUrl); + return 0; } } - taosCloseCmd(&pCmd); - if (line != NULL) taosMemoryFreeClear(line); } const char *filepath = ".env"; @@ -1083,10 +1066,11 @@ int32_t cfgGetApollUrl(const char **envCmd, const char *envFile, char* apolloUrl return 0; } } + int64_t _bytes; TdFilePtr pFile = taosOpenFile(filepath, TD_FILE_READ | TD_FILE_STREAM); if (pFile != NULL) { while (!taosEOFFile(pFile)) { - _bytes = taosGetLineFile(pFile, &line); + _bytes = taosGetsFile(pFile, sizeof(line) - 1, line); if (_bytes <= 0) { break; } @@ -1101,14 +1085,12 @@ int32_t cfgGetApollUrl(const char **envCmd, const char *envFile, char* apolloUrl } memcpy(apolloUrl, p, TMIN(strlen(p)+1,PATH_MAX)); taosCloseFile(&pFile); - if (line != NULL) taosMemoryFreeClear(line); uInfo("get apollo url from env file success"); return 0; } } } taosCloseFile(&pFile); - if (line != NULL) taosMemoryFreeClear(line); } uInfo("fail get apollo url from cmd env file"); diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 6e3067d44edc67f10944cdde2ffc72fbd4b57fea..f99d1268924ff52e7f7d7beb97d5b2a1d28bd477 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -121,7 +121,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_CONN_KILLED, "Connection killed") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_SQL_SYNTAX_ERROR, "Syntax error in SQL") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DB_NOT_SELECTED, "Database not specified or available") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TABLE_NAME, "Table does not exist") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too long, check maxSQLLength config") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too long") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_FILE_EMPTY, "File is empty") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_LINE_SYNTAX_ERROR, "Syntax error in Line") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_META_CACHED, "No table meta cached") @@ -293,6 +293,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_CGROUP_USED, "Consumer group being TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_ALREADY_EXIST, "Stream already exists") TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_NOT_EXIST, "Stream not exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STREAM_OPTION, "Invalid stream option") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_MUST_BE_DELETED, "Stream must be dropped first") // mnode-sma TAOS_DEFINE_ERROR(TSDB_CODE_MND_SMA_ALREADY_EXIST, "SMA already exists") @@ -616,6 +617,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_FILE_CORRUPTED, "Rsma file corrupted TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_REMOVE_EXISTS, "Rsma remove exists") TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP, "Rsma fetch msg is messed up") TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_EMPTY_INFO, "Rsma info is empty") +TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_INVALID_SCHEMA, "Rsma invalid schema") //index TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Index is rebuilding") @@ -624,6 +626,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Invalid index file" //tmq TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message") TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_MISMATCH, "Consumer mismatch") +TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_CLOSED, "Consumer closed") #ifdef TAOS_ERROR_C }; diff --git a/source/util/src/thash.c b/source/util/src/thash.c index aee84a0d55336c63840d1a5df887da7752592841..b69d8ea52866055668ce4937836c5eb46842f1c2 100644 --- a/source/util/src/thash.c +++ b/source/util/src/thash.c @@ -21,7 +21,7 @@ // the add ref count operation may trigger the warning if the reference count is greater than the MAX_WARNING_REF_COUNT #define MAX_WARNING_REF_COUNT 10000 -#define HASH_MAX_CAPACITY (1024 * 1024 * 16) +#define HASH_MAX_CAPACITY (1024 * 1024 * 1024) #define HASH_DEFAULT_LOAD_FACTOR (0.75) #define HASH_INDEX(v, c) ((v) & ((c)-1)) @@ -67,6 +67,7 @@ struct SHashObj { bool enableUpdate; // enable update SArray *pMemBlock; // memory block allocated for SHashEntry _hash_before_fn_t callbackFp; // function invoked before return the value to caller + int64_t compTimes; }; /* @@ -146,6 +147,7 @@ static FORCE_INLINE SHashNode *doSearchInEntryList(SHashObj *pHashObj, SHashEntr uint32_t hashVal) { SHashNode *pNode = pe->next; while (pNode) { + atomic_add_fetch_64(&pHashObj->compTimes, 1); if ((pNode->keyLen == keyLen) && ((*(pHashObj->equalFp))(GET_HASH_NODE_KEY(pNode), key, keyLen) == 0) && pNode->removed == 0) { assert(pNode->hashVal == hashVal); @@ -250,11 +252,15 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, SHashLockTyp // the max slots is not defined by user pHashObj->capacity = taosHashCapacity((int32_t)capacity); + pHashObj->size = 0; pHashObj->equalFp = memcmp; pHashObj->hashFp = fn; pHashObj->type = type; + pHashObj->lock = 0; pHashObj->enableUpdate = update; + pHashObj->freeFp = NULL; + pHashObj->callbackFp = NULL; ASSERT((pHashObj->capacity & (pHashObj->capacity - 1)) == 0); @@ -327,7 +333,7 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, const vo // disable resize taosHashRLock(pHashObj); - int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); + uint32_t slot = HASH_INDEX(hashVal, pHashObj->capacity); SHashEntry *pe = pHashObj->hashList[slot]; taosHashEntryWLock(pHashObj, pe); @@ -882,3 +888,7 @@ void *taosHashAcquire(SHashObj *pHashObj, const void *key, size_t keyLen) { } void taosHashRelease(SHashObj *pHashObj, void *p) { taosHashCancelIterate(pHashObj, p); } + +int64_t taosHashGetCompTimes(SHashObj *pHashObj) { return atomic_load_64(&pHashObj->compTimes); } + + diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index 2e8239c68f0861486d2d6175d698dc76ed92b128..06ebbf27fb1dc2e32d6b9ce26b735df827de8f96 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -97,7 +97,7 @@ int32_t tqDebugFlag = 135; int32_t fsDebugFlag = 135; int32_t metaDebugFlag = 135; int32_t udfDebugFlag = 135; -int32_t smaDebugFlag = 135; +int32_t smaDebugFlag = 131; int32_t idxDebugFlag = 135; int64_t dbgEmptyW = 0; @@ -429,7 +429,7 @@ static inline int32_t taosBuildLogHead(char *buffer, const char *flags) { } static inline void taosPrintLogImp(ELogLevel level, int32_t dflag, const char *buffer, int32_t len) { - if ((dflag & DEBUG_FILE) && tsLogObj.logHandle && tsLogObj.logHandle->pFile != NULL) { + if ((dflag & DEBUG_FILE) && tsLogObj.logHandle && tsLogObj.logHandle->pFile != NULL && osLogSpaceAvailable()) { taosUpdateLogNums(level); if (tsAsyncLog) { taosPushLogBuffer(tsLogObj.logHandle, buffer, len); @@ -451,7 +451,6 @@ static inline void taosPrintLogImp(ELogLevel level, int32_t dflag, const char *b } void taosPrintLog(const char *flags, ELogLevel level, int32_t dflag, const char *format, ...) { - if (!osLogSpaceAvailable()) return; if (!(dflag & DEBUG_FILE) && !(dflag & DEBUG_SCREEN)) return; char buffer[LOG_MAX_LINE_BUFFER_SIZE]; diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c index 0e608d0da22da836f0a357c7bd4f9b194c11fd13..2767fed9373aa47ebdbea39b07f28c238db14c7d 100644 --- a/source/util/src/tpagedbuf.c +++ b/source/util/src/tpagedbuf.c @@ -33,7 +33,7 @@ struct SDiskbasedBuf { int32_t pageSize; // current used page size int32_t inMemPages; // numOfPages that are allocated in memory SList* freePgList; // free page list - SHashObj* groupSet; // id hash table, todo remove it + SArray* pIdList; // page id list SHashObj* all; SList* lruList; void* emptyDummyIdList; // dummy id list @@ -241,26 +241,7 @@ static int32_t loadPageFromDisk(SDiskbasedBuf* pBuf, SPageInfo* pg) { return 0; } -static SIDList addNewGroup(SDiskbasedBuf* pBuf, int32_t groupId) { - assert(taosHashGet(pBuf->groupSet, (const char*)&groupId, sizeof(int32_t)) == NULL); - - SArray* pa = taosArrayInit(1, POINTER_BYTES); - int32_t ret = taosHashPut(pBuf->groupSet, (const char*)&groupId, sizeof(int32_t), &pa, POINTER_BYTES); - assert(ret == 0); - - return pa; -} - -static SPageInfo* registerPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t pageId) { - SIDList list = NULL; - - char** p = taosHashGet(pBuf->groupSet, (const char*)&groupId, sizeof(int32_t)); - if (p == NULL) { // it is a new group id - list = addNewGroup(pBuf, groupId); - } else { - list = (SIDList)(*p); - } - +static SPageInfo* registerPage(SDiskbasedBuf* pBuf, int32_t pageId) { pBuf->numOfPages += 1; SPageInfo* ppi = taosMemoryMalloc(sizeof(SPageInfo)); @@ -273,7 +254,7 @@ static SPageInfo* registerPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t pag ppi->pn = NULL; ppi->dirty = false; - return *(SPageInfo**)taosArrayPush(list, &ppi); + return *(SPageInfo**)taosArrayPush(pBuf->pIdList, &ppi); } static SListNode* getEldestUnrefedPage(SDiskbasedBuf* pBuf) { @@ -293,22 +274,13 @@ static SListNode* getEldestUnrefedPage(SDiskbasedBuf* pBuf) { } } - // int32_t pos = listNEles(pBuf->lruList); - // SListIter iter1 = {0}; - // tdListInitIter(pBuf->lruList, &iter1, TD_LIST_BACKWARD); - // SListNode* pn1 = NULL; - // while((pn1 = tdListNext(&iter1)) != NULL) { - // SPageInfo* pageInfo = *(SPageInfo**) pn1->data; - // printf("page %d is used, dirty:%d, pos:%d\n", pageInfo->pageId, pageInfo->dirty, pos - 1); - // pos -= 1; - // } - return pn; } static char* evacOneDataPage(SDiskbasedBuf* pBuf) { char* bufPage = NULL; SListNode* pn = getEldestUnrefedPage(pBuf); + terrno = 0; // all pages are referenced by user, try to allocate new space if (pn == NULL) { @@ -332,6 +304,7 @@ static char* evacOneDataPage(SDiskbasedBuf* pBuf) { bufPage = flushPageToDisk(pBuf, d); } + ASSERT((bufPage != NULL) || terrno != TSDB_CODE_SUCCESS); return bufPage; } @@ -380,7 +353,8 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem // init id hash table _hash_fn_t fn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT); - pPBuf->groupSet = taosHashInit(10, fn, true, false); + pPBuf->pIdList = taosArrayInit(4, POINTER_BYTES); + pPBuf->assistBuf = taosMemoryMalloc(pPBuf->pageSize + 2); // EXTRA BYTES pPBuf->all = taosHashInit(10, fn, true, false); @@ -397,7 +371,7 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem return TSDB_CODE_SUCCESS; } -void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId) { +void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId) { pBuf->statis.getPages += 1; char* availablePage = NULL; @@ -423,7 +397,7 @@ void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId) { *pageId = (++pBuf->allocateId); // register page id info - pi = registerPage(pBuf, groupId, *pageId); + pi = registerPage(pBuf, *pageId); // add to hash map taosHashPut(pBuf->all, pageId, sizeof(int32_t), &pi, POINTER_BYTES); @@ -524,19 +498,11 @@ void releaseBufPageInfo(SDiskbasedBuf* pBuf, SPageInfo* pi) { pBuf->statis.releasePages += 1; } -size_t getNumOfBufGroupId(const SDiskbasedBuf* pBuf) { return taosHashGetSize(pBuf->groupSet); } - size_t getTotalBufSize(const SDiskbasedBuf* pBuf) { return (size_t)pBuf->totalBufSize; } -SIDList getDataBufPagesIdList(SDiskbasedBuf* pBuf, int32_t groupId) { - assert(pBuf != NULL); - - char** p = taosHashGet(pBuf->groupSet, (const char*)&groupId, sizeof(int32_t)); - if (p == NULL) { // it is a new group id - return pBuf->emptyDummyIdList; - } else { - return (SArray*)(*p); - } +SIDList getDataBufPagesIdList(SDiskbasedBuf* pBuf) { + ASSERT(pBuf != NULL); + return pBuf->pIdList; } void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) { @@ -576,26 +542,21 @@ void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) { taosRemoveFile(pBuf->path); taosMemoryFreeClear(pBuf->path); - SArray** p = taosHashIterate(pBuf->groupSet, NULL); - while (p) { - size_t n = taosArrayGetSize(*p); - for (int32_t i = 0; i < n; ++i) { - SPageInfo* pi = taosArrayGetP(*p, i); - taosMemoryFreeClear(pi->pData); - taosMemoryFreeClear(pi); - } - - taosArrayDestroy(*p); - p = taosHashIterate(pBuf->groupSet, p); + size_t n = taosArrayGetSize(pBuf->pIdList); + for (int32_t i = 0; i < n; ++i) { + SPageInfo* pi = taosArrayGetP(pBuf->pIdList, i); + taosMemoryFreeClear(pi->pData); + taosMemoryFreeClear(pi); } + taosArrayDestroy(pBuf->pIdList); + tdListFree(pBuf->lruList); tdListFree(pBuf->freePgList); taosArrayDestroy(pBuf->emptyDummyIdList); taosArrayDestroy(pBuf->pFree); - taosHashCleanup(pBuf->groupSet); taosHashCleanup(pBuf->all); taosMemoryFreeClear(pBuf->id); @@ -659,32 +620,32 @@ void dBufPrintStatis(const SDiskbasedBuf* pBuf) { pBuf->totalBufSize / 1024.0, pBuf->numOfPages, listNEles(pBuf->lruList) * pBuf->pageSize / 1024.0, listNEles(pBuf->lruList), pBuf->fileSize / 1024.0, pBuf->pageSize / 1024.0f, pBuf->id); - printf( - "Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages), avgPageSize:%.2f Kb\n", - ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f, ps->loadPages, - ps->loadBytes / (1024.0 * ps->loadPages)); + if (ps->loadPages > 0) { + printf( + "Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages), avgPageSize:%.2f Kb\n", + ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f, + ps->loadPages, ps->loadBytes / (1024.0 * ps->loadPages)); + } else { + printf("no page loaded\n"); + } } void clearDiskbasedBuf(SDiskbasedBuf* pBuf) { - SArray** p = taosHashIterate(pBuf->groupSet, NULL); - while (p) { - size_t n = taosArrayGetSize(*p); - for (int32_t i = 0; i < n; ++i) { - SPageInfo* pi = taosArrayGetP(*p, i); - taosMemoryFreeClear(pi->pData); - taosMemoryFreeClear(pi); - } - taosArrayDestroy(*p); - p = taosHashIterate(pBuf->groupSet, p); + size_t n = taosArrayGetSize(pBuf->pIdList); + for (int32_t i = 0; i < n; ++i) { + SPageInfo* pi = taosArrayGetP(pBuf->pIdList, i); + taosMemoryFreeClear(pi->pData); + taosMemoryFreeClear(pi); } + taosArrayClear(pBuf->pIdList); + tdListEmpty(pBuf->lruList); tdListEmpty(pBuf->freePgList); taosArrayClear(pBuf->emptyDummyIdList); taosArrayClear(pBuf->pFree); - taosHashClear(pBuf->groupSet); taosHashClear(pBuf->all); pBuf->numOfPages = 0; // all pages are in buffer in the first place diff --git a/source/util/src/tqueue.c b/source/util/src/tqueue.c index 50beba8a9b67e191d402e580a2964f954c454869..eb70002680cd8df2849ffa4fcb6c7c27ddf330d4 100644 --- a/source/util/src/tqueue.c +++ b/source/util/src/tqueue.c @@ -298,7 +298,8 @@ int32_t taosGetQitem(STaosQall *qall, void **ppItem) { return num; } -void taosResetQitems(STaosQall *qall) { qall->current = qall->start; } +void taosResetQitems(STaosQall *qall) { qall->current = qall->start; } +int32_t taosQallItemSize(STaosQall *qall) { return qall->numOfItems; } STaosQset *taosOpenQset() { STaosQset *qset = taosMemoryCalloc(sizeof(STaosQset), 1); diff --git a/source/util/src/trbtree.c b/source/util/src/trbtree.c new file mode 100644 index 0000000000000000000000000000000000000000..0970485dade90bb8719a2fa39facb047e07bcfff --- /dev/null +++ b/source/util/src/trbtree.c @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" + +typedef int32_t (*tRBTreeCmprFn)(void *, void *); + +typedef struct SRBTree SRBTree; +typedef struct SRBTreeNode SRBTreeNode; +typedef struct SRBTreeIter SRBTreeIter; + +struct SRBTreeNode { + enum { RED, BLACK } color; + SRBTreeNode *parent; + SRBTreeNode *left; + SRBTreeNode *right; + uint8_t payload[]; +}; + +struct SRBTree { + tRBTreeCmprFn cmprFn; + SRBTreeNode *root; +}; + +struct SRBTreeIter { + SRBTree *pTree; +}; + +#define RBTREE_NODE_COLOR(N) ((N) ? (N)->color : BLACK) + +// APIs ================================================ +static void tRBTreeRotateLeft(SRBTree *pTree, SRBTreeNode *pNode) { + SRBTreeNode *right = pNode->right; + + pNode->right = right->left; + if (pNode->right) { + pNode->right->parent = pNode; + } + + right->parent = pNode->parent; + if (pNode->parent == NULL) { + pTree->root = right; + } else if (pNode == pNode->parent->left) { + pNode->parent->left = right; + } else { + pNode->parent->right = right; + } + + right->left = pNode; + pNode->parent = right; +} + +static void tRBTreeRotateRight(SRBTree *pTree, SRBTreeNode *pNode) { + SRBTreeNode *left = pNode->left; + + pNode->left = left->right; + if (pNode->left) { + pNode->left->parent = pNode; + } + + left->parent = pNode->parent; + if (pNode->parent == NULL) { + pTree->root = left; + } else if (pNode == pNode->parent->left) { + pNode->parent->left = left; + } else { + pNode->parent->right = left; + } + + left->right = pNode; + pNode->parent = left; +} + +#define tRBTreeCreate(compare) \ + (SRBTree) { .cmprFn = (compare), .root = NULL } + +SRBTreeNode *tRBTreePut(SRBTree *pTree, SRBTreeNode *pNew) { + pNew->left = NULL; + pNew->right = NULL; + pNew->color = RED; + + // insert + if (pTree->root == NULL) { + pNew->parent = NULL; + pTree->root = pNew; + } else { + SRBTreeNode *pNode = pTree->root; + while (true) { + ASSERT(pNode); + + int32_t c = pTree->cmprFn(pNew->payload, pNode->payload); + if (c < 0) { + if (pNode->left) { + pNode = pNode->left; + } else { + pNew->parent = pNode; + pNode->left = pNew; + break; + } + } else if (c > 0) { + if (pNode->right) { + pNode = pNode->right; + } else { + pNew->parent = pNode; + pNode->right = pNew; + break; + } + } else { + return NULL; + } + } + } + + // fix + SRBTreeNode *pNode = pNew; + while (pNode->parent && pNode->parent->color == RED) { + SRBTreeNode *p = pNode->parent; + SRBTreeNode *g = p->parent; + + if (p == g->left) { + SRBTreeNode *u = g->right; + + if (RBTREE_NODE_COLOR(u) == RED) { + p->color = BLACK; + u->color = BLACK; + g->color = RED; + pNode = g; + } else { + if (pNode == p->right) { + pNode = p; + tRBTreeRotateLeft(pTree, pNode); + } + pNode->parent->color = BLACK; + pNode->parent->parent->color = RED; + tRBTreeRotateRight(pTree, pNode->parent->parent); + } + } else { + SRBTreeNode *u = g->left; + + if (RBTREE_NODE_COLOR(u) == RED) { + p->color = BLACK; + u->color = BLACK; + g->color = RED; + } else { + if (pNode == p->left) { + pNode = p; + tRBTreeRotateRight(pTree, pNode); + } + pNode->parent->color = BLACK; + pNode->parent->parent->color = RED; + tRBTreeRotateLeft(pTree, pNode->parent->parent); + } + } + } + + pTree->root->color = BLACK; + return pNew; +} + +SRBTreeNode *tRBTreeDrop(SRBTree *pTree, void *pKey) { + SRBTreeNode *pNode = pTree->root; + + while (pNode) { + int32_t c = pTree->cmprFn(pKey, pNode->payload); + + if (c < 0) { + pNode = pNode->left; + } else if (c > 0) { + pNode = pNode->right; + } else { + break; + } + } + + if (pNode) { + // TODO + } + + return pNode; +} + +SRBTreeNode *tRBTreeGet(SRBTree *pTree, void *pKey) { + SRBTreeNode *pNode = pTree->root; + + while (pNode) { + int32_t c = pTree->cmprFn(pKey, pNode->payload); + + if (c < 0) { + pNode = pNode->left; + } else if (c > 0) { + pNode = pNode->right; + } else { + break; + } + } + + return pNode; +} diff --git a/source/util/src/version.c.in b/source/util/src/version.c.in index be1a4a404875739cdef349a901e52e195c2a9cde..cb307b57fce37ba4243aea83995e66612f3c4371 100644 --- a/source/util/src/version.c.in +++ b/source/util/src/version.c.in @@ -1,4 +1,4 @@ -char version[12] = "${TD_VER_NUMBER}"; +char version[64] = "${TD_VER_NUMBER}"; char compatible_version[12] = "${TD_VER_COMPATIBLE}"; char gitinfo[48] = "${TD_VER_GIT}"; char buildinfo[64] = "Built at ${TD_VER_DATE}"; diff --git a/source/util/test/hashTest.cpp b/source/util/test/hashTest.cpp index 99f5a761c5d0d3a489176749883da981c847011d..97e67ea36e7120b5e09f1097b5fb979b6fc12224 100644 --- a/source/util/test/hashTest.cpp +++ b/source/util/test/hashTest.cpp @@ -197,6 +197,201 @@ void acquireRleaseTest() { taosMemoryFreeClear(data.p); } +void perfTest() { + SHashObj* hash1h = (SHashObj*) taosHashInit(100, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + SHashObj* hash1s = (SHashObj*) taosHashInit(1000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + SHashObj* hash10s = (SHashObj*) taosHashInit(10000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + SHashObj* hash100s = (SHashObj*) taosHashInit(100000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + SHashObj* hash1m = (SHashObj*) taosHashInit(1000000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + SHashObj* hash10m = (SHashObj*) taosHashInit(10000000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + SHashObj* hash100m = (SHashObj*) taosHashInit(100000000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + + char *name = (char*)taosMemoryCalloc(50000000, 9); + for (int64_t i = 0; i < 50000000; ++i) { + sprintf(name + i * 9, "t%08d", i); + } + + for (int64_t i = 0; i < 50; ++i) { + taosHashPut(hash1h, name + i * 9, 9, &i, sizeof(i)); + } + + for (int64_t i = 0; i < 500; ++i) { + taosHashPut(hash1s, name + i * 9, 9, &i, sizeof(i)); + } + + for (int64_t i = 0; i < 5000; ++i) { + taosHashPut(hash10s, name + i * 9, 9, &i, sizeof(i)); + } + + for (int64_t i = 0; i < 50000; ++i) { + taosHashPut(hash100s, name + i * 9, 9, &i, sizeof(i)); + } + + for (int64_t i = 0; i < 500000; ++i) { + taosHashPut(hash1m, name + i * 9, 9, &i, sizeof(i)); + } + + for (int64_t i = 0; i < 5000000; ++i) { + taosHashPut(hash10m, name + i * 9, 9, &i, sizeof(i)); + } + + for (int64_t i = 0; i < 50000000; ++i) { + taosHashPut(hash100m, name + i * 9, 9, &i, sizeof(i)); + } + + int64_t start1h = taosGetTimestampMs(); + int64_t start1hCt = taosHashGetCompTimes(hash1h); + for (int64_t i = 0; i < 10000000; ++i) { + ASSERT(taosHashGet(hash1h, name + (i % 50) * 9, 9)); + } + int64_t end1h = taosGetTimestampMs(); + int64_t end1hCt = taosHashGetCompTimes(hash1h); + + int64_t start1s = taosGetTimestampMs(); + int64_t start1sCt = taosHashGetCompTimes(hash1s); + for (int64_t i = 0; i < 10000000; ++i) { + ASSERT(taosHashGet(hash1s, name + (i % 500) * 9, 9)); + } + int64_t end1s = taosGetTimestampMs(); + int64_t end1sCt = taosHashGetCompTimes(hash1s); + + int64_t start10s = taosGetTimestampMs(); + int64_t start10sCt = taosHashGetCompTimes(hash10s); + for (int64_t i = 0; i < 10000000; ++i) { + ASSERT(taosHashGet(hash10s, name + (i % 5000) * 9, 9)); + } + int64_t end10s = taosGetTimestampMs(); + int64_t end10sCt = taosHashGetCompTimes(hash10s); + + int64_t start100s = taosGetTimestampMs(); + int64_t start100sCt = taosHashGetCompTimes(hash100s); + for (int64_t i = 0; i < 10000000; ++i) { + ASSERT(taosHashGet(hash100s, name + (i % 50000) * 9, 9)); + } + int64_t end100s = taosGetTimestampMs(); + int64_t end100sCt = taosHashGetCompTimes(hash100s); + + int64_t start1m = taosGetTimestampMs(); + int64_t start1mCt = taosHashGetCompTimes(hash1m); + for (int64_t i = 0; i < 10000000; ++i) { + ASSERT(taosHashGet(hash1m, name + (i % 500000) * 9, 9)); + } + int64_t end1m = taosGetTimestampMs(); + int64_t end1mCt = taosHashGetCompTimes(hash1m); + + int64_t start10m = taosGetTimestampMs(); + int64_t start10mCt = taosHashGetCompTimes(hash10m); + for (int64_t i = 0; i < 10000000; ++i) { + ASSERT(taosHashGet(hash10m, name + (i % 5000000) * 9, 9)); + } + int64_t end10m = taosGetTimestampMs(); + int64_t end10mCt = taosHashGetCompTimes(hash10m); + + int64_t start100m = taosGetTimestampMs(); + int64_t start100mCt = taosHashGetCompTimes(hash100m); + for (int64_t i = 0; i < 10000000; ++i) { + ASSERT(taosHashGet(hash100m, name + (i % 50000000) * 9, 9)); + } + int64_t end100m = taosGetTimestampMs(); + int64_t end100mCt = taosHashGetCompTimes(hash100m); + + + SArray *sArray[1000] = {0}; + for (int64_t i = 0; i < 1000; ++i) { + sArray[i] = taosArrayInit(100000, 9); + } + int64_t cap = 4; + while (cap < 100000000) cap = (cap << 1u); + + _hash_fn_t hashFp = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + int32_t slotR = cap / 1000 + 1; + for (int64_t i = 0; i < 10000000; ++i) { + char* p = name + (i % 50000000) * 9; + uint32_t v = (*hashFp)(p, 9); + taosArrayPush(sArray[(v%cap)/slotR], p); + } + SArray *slArray = taosArrayInit(100000000, 9); + for (int64_t i = 0; i < 1000; ++i) { + int32_t num = taosArrayGetSize(sArray[i]); + SArray* pArray = sArray[i]; + for (int64_t m = 0; m < num; ++m) { + char* p = (char*)taosArrayGet(pArray, m); + ASSERT(taosArrayPush(slArray, p)); + } + } + int64_t start100mS = taosGetTimestampMs(); + int64_t start100mSCt = taosHashGetCompTimes(hash100m); + int32_t num = taosArrayGetSize(slArray); + for (int64_t i = 0; i < num; ++i) { + ASSERT(taosHashGet(hash100m, (char*)TARRAY_GET_ELEM(slArray, i), 9)); + } + int64_t end100mS = taosGetTimestampMs(); + int64_t end100mSCt = taosHashGetCompTimes(hash100m); + for (int64_t i = 0; i < 1000; ++i) { + taosArrayDestroy(sArray[i]); + } + taosArrayDestroy(slArray); + + printf("1h \t %" PRId64 "ms,%" PRId64 "\n", end1h - start1h, end1hCt - start1hCt); + printf("1s \t %" PRId64 "ms,%" PRId64 "\n", end1s - start1s, end1sCt - start1sCt); + printf("10s \t %" PRId64 "ms,%" PRId64 "\n", end10s - start10s, end10sCt - start10sCt); + printf("100s \t %" PRId64 "ms,%" PRId64 "\n", end100s - start100s, end100sCt - start100sCt); + printf("1m \t %" PRId64 "ms,%" PRId64 "\n", end1m - start1m, end1mCt - start1mCt); + printf("10m \t %" PRId64 "ms,%" PRId64 "\n", end10m - start10m, end10mCt - start10mCt); + printf("100m \t %" PRId64 "ms,%" PRId64 "\n", end100m - start100m, end100mCt - start100mCt); + printf("100mS \t %" PRId64 "ms,%" PRId64 "\n", end100mS - start100mS, end100mSCt - start100mSCt); + + taosHashCleanup(hash1h); + taosHashCleanup(hash1s); + taosHashCleanup(hash10s); + taosHashCleanup(hash100s); + taosHashCleanup(hash1m); + taosHashCleanup(hash10m); + taosHashCleanup(hash100m); + + SHashObj *mhash[1000] = {0}; + for (int64_t i = 0; i < 1000; ++i) { + mhash[i] = (SHashObj*) taosHashInit(100000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + } + + for (int64_t i = 0; i < 50000000; ++i) { +#if 0 + taosHashPut(mhash[i%1000], name + i * 9, 9, &i, sizeof(i)); +#else + taosHashPut(mhash[i/50000], name + i * 9, 9, &i, sizeof(i)); +#endif + } + + int64_t startMhashCt = 0; + for (int64_t i = 0; i < 1000; ++i) { + startMhashCt += taosHashGetCompTimes(mhash[i]); + } + + int64_t startMhash = taosGetTimestampMs(); +#if 0 + for (int32_t i = 0; i < 10000000; ++i) { + ASSERT(taosHashGet(mhash[i%1000], name + i * 9, 9)); + } +#else +// for (int64_t i = 0; i < 10000000; ++i) { + for (int64_t i = 0; i < 50000000; i+=5) { + ASSERT(taosHashGet(mhash[i/50000], name + i * 9, 9)); + } +#endif + int64_t endMhash = taosGetTimestampMs(); + int64_t endMhashCt = 0; + for (int64_t i = 0; i < 1000; ++i) { + printf(" %" PRId64 , taosHashGetCompTimes(mhash[i])); + endMhashCt += taosHashGetCompTimes(mhash[i]); + } + printf("\n100m \t %" PRId64 "ms,%" PRId64 "\n", endMhash - startMhash, endMhashCt - startMhashCt); + + for (int64_t i = 0; i < 1000; ++i) { + taosHashCleanup(mhash[i]); + } +} + + } int main(int argc, char** argv) { @@ -210,4 +405,5 @@ TEST(testCase, hashTest) { noLockPerformanceTest(); multithreadsTest(); acquireRleaseTest(); + //perfTest(); } diff --git a/source/util/test/pageBufferTest.cpp b/source/util/test/pageBufferTest.cpp index eaf198a483aa5e3e90595d2417516aa53f754331..1a057c5875ee95de2fc3c457ca09314366fff48c 100644 --- a/source/util/test/pageBufferTest.cpp +++ b/source/util/test/pageBufferTest.cpp @@ -18,7 +18,7 @@ void simpleTest() { int32_t pageId = 0; int32_t groupId = 0; - SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId)); ASSERT_TRUE(pBufPage != NULL); ASSERT_EQ(getTotalBufSize(pBuf), 1024); @@ -29,26 +29,26 @@ void simpleTest() { releaseBufPage(pBuf, pBufPage); - SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t == pBufPage1); - SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t1 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t1 == pBufPage2); - SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t2 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t2 == pBufPage3); - SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t3 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t3 == pBufPage4); releaseBufPage(pBuf, pBufPage2); - SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t4 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t4 == pBufPage5); @@ -64,7 +64,7 @@ void writeDownTest() { int32_t groupId = 0; int32_t nx = 12345; - SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId)); ASSERT_TRUE(pBufPage != NULL); *(int32_t*)(pBufPage->data) = nx; @@ -73,22 +73,22 @@ void writeDownTest() { setBufPageDirty(pBufPage, true); releaseBufPage(pBuf, pBufPage); - SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t1 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t1 == pBufPage1); ASSERT_TRUE(pageId == 1); - SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t2 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t2 == pBufPage2); ASSERT_TRUE(pageId == 2); - SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t3 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t3 == pBufPage3); ASSERT_TRUE(pageId == 3); - SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t4 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t4 == pBufPage4); ASSERT_TRUE(pageId == 4); @@ -113,32 +113,32 @@ void recyclePageTest() { int32_t groupId = 0; int32_t nx = 12345; - SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId)); ASSERT_TRUE(pBufPage != NULL); releaseBufPage(pBuf, pBufPage); - SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t1 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t1 == pBufPage1); ASSERT_TRUE(pageId == 1); - SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t2 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t2 == pBufPage2); ASSERT_TRUE(pageId == 2); - SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t3 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t3 == pBufPage3); ASSERT_TRUE(pageId == 3); - SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t4 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t4 == pBufPage4); ASSERT_TRUE(pageId == 4); releaseBufPage(pBuf, t4); - SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, groupId, &pageId)); + SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, &pageId)); SFilePage* t5 = static_cast(getBufPage(pBuf, pageId)); ASSERT_TRUE(t5 == pBufPage5); ASSERT_TRUE(pageId == 5); diff --git a/tests/docs-examples-test/jdbc.sh b/tests/docs-examples-test/jdbc.sh new file mode 100644 index 0000000000000000000000000000000000000000..d71085a40306956ea8d25e9b575c97ae9945df76 --- /dev/null +++ b/tests/docs-examples-test/jdbc.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +pgrep taosd || taosd >> /dev/null 2>&1 & +pgrep taosadapter || taosadapter >> /dev/null 2>&1 & +cd ../../docs/examples/java + +mvn clean test > jdbc-out.log 2>&1 +tail -n 20 jdbc-out.log + +cases=`grep 'Tests run' jdbc-out.log | awk 'END{print $3}'` +totalJDBCCases=`echo ${cases/%,}` +failed=`grep 'Tests run' jdbc-out.log | awk 'END{print $5}'` +JDBCFailed=`echo ${failed/%,}` +error=`grep 'Tests run' jdbc-out.log | awk 'END{print $7}'` +JDBCError=`echo ${error/%,}` + +totalJDBCFailed=`expr $JDBCFailed + $JDBCError` +totalJDBCSuccess=`expr $totalJDBCCases - $totalJDBCFailed` + +if [ "$totalJDBCSuccess" -gt "0" ]; then + echo -e "\n${GREEN} ### Total $totalJDBCSuccess JDBC case(s) succeed! ### ${NC}" +fi + +if [ "$totalJDBCFailed" -ne "0" ]; then + echo -e "\n${RED} ### Total $totalJDBCFailed JDBC case(s) failed! ### ${NC}" + exit 8 +fi \ No newline at end of file diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 203541f14a49e27d8298cb6f21077bae8cfbc0b9..600c64b8e6ac0a521d3c736c3256c79dfcefbf8e 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -1120,7 +1120,7 @@ class Database: @classmethod def setupLastTick(cls): # start time will be auto generated , start at 10 years ago local time - local_time = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-16] + local_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-16] local_epoch_time = [int(i) for i in local_time.split("-")] #local_epoch_time will be such as : [2022, 7, 18] diff --git a/tests/pytest/crash_gen/shared/misc.py b/tests/pytest/crash_gen/shared/misc.py index fd73f97fcbd0f241ac96a241ff3f912a67ce58d4..6a8a59a02700a4e15320532e0f20b2d4e0b0c336 100644 --- a/tests/pytest/crash_gen/shared/misc.py +++ b/tests/pytest/crash_gen/shared/misc.py @@ -46,7 +46,7 @@ class Logging: @classmethod def _get_datetime(cls): - return datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-1] + return datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-1] @classmethod def getLogger(cls): diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index 33ef92bf735a5211044ebd37c3c8300abd8843a8..9ffebcbdad5f0fa07e26f1bb4d249643ab7bbe42 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -199,22 +199,22 @@ class TDCom: res = requests.post(url, sql.encode("utf-8"), headers = self.preDefine()[0]) return res - def cleanTb(self, type="taosc"): + def cleanTb(self, type="taosc", dbname="db"): ''' type is taosc or restful ''' - query_sql = "show stables" + query_sql = f"show {dbname}.stables" res_row_list = tdSql.query(query_sql, True) stb_list = map(lambda x: x[0], res_row_list) for stb in stb_list: if type == "taosc": - tdSql.execute(f'drop table if exists `{stb}`') + tdSql.execute(f'drop table if exists {dbname}.`{stb}`') if not stb[0].isdigit(): - tdSql.execute(f'drop table if exists {stb}') + tdSql.execute(f'drop table if exists {dbname}.{stb}') elif type == "restful": - self.restApiPost(f"drop table if exists `{stb}`") + self.restApiPost(f"drop table if exists {dbname}.`{stb}`") if not stb[0].isdigit(): - self.restApiPost(f"drop table if exists {stb}") + self.restApiPost(f"drop table if exists {dbname}.{stb}") def dateToTs(self, datetime_input): return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f"))) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index e530695d1e53c4628fb28175b308b67d149c16a3..89b7fe00ebb0cf04b4570643966d553a4bccea9b 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -36,9 +36,9 @@ class TDSimClient: "rpcDebugFlag": "143", "tmrDebugFlag": "131", "cDebugFlag": "143", - "udebugFlag": "143", - "jnidebugFlag": "143", - "qdebugFlag": "143", + "uDebugFlag": "143", + "jniDebugFlag": "143", + "qDebugFlag": "143", "supportVnodes": "1024", "telemetryReporting": "0", } @@ -134,7 +134,6 @@ class TDDnode: "uDebugFlag": "131", "sDebugFlag": "143", "wDebugFlag": "143", - "qdebugFlag": "143", "numOfLogLines": "100000000", "statusInterval": "1", "supportVnodes": "1024", @@ -484,7 +483,7 @@ class TDDnode: psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled processID = subprocess.check_output( psCmd, shell=True).decode("utf-8") - + onlyKillOnceWindows = 0 while(processID): if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'): diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 753c41e094701271ca3b49a53eabde1461bd1e08..b320cf5995fd0063352f0da7a2dc04933022a7d2 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -102,7 +102,7 @@ class TDSql: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, sql, repr(e)) tdLog.notice("%s(%d) failed: sql:%s, %s" % args) - raise Exception(repr(e)) + raise Exception(repr(e)) i+=1 time.sleep(1) pass @@ -225,25 +225,21 @@ class TDSql: # suppose user want to check nanosecond timestamp if a longer data passed if (len(data) >= 28): if pd.to_datetime(self.queryResult[row][col]) == pd.to_datetime(data): - tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" % - (self.sql, row, col, self.queryResult[row][col], data)) + tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") else: if self.queryResult[row][col] == _parse_datetime(data): - tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % - (self.sql, row, col, self.queryResult[row][col], data)) + tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") return if str(self.queryResult[row][col]) == str(data): - tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % - (self.sql, row, col, self.queryResult[row][col], data)) + tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") return + elif isinstance(data, float): if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001: - tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" % - (self.sql, row, col, self.queryResult[row][col], data)) + tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001: - tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" % - (self.sql, row, col, self.queryResult[row][col], data)) + tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") else: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) @@ -254,21 +250,7 @@ class TDSql: args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) - if data is None: - tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % - (self.sql, row, col, self.queryResult[row][col], data)) - elif isinstance(data, str): - tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % - (self.sql, row, col, self.queryResult[row][col], data)) - elif isinstance(data, datetime.date): - tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % - (self.sql, row, col, self.queryResult[row][col], data)) - elif isinstance(data, float): - tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % - (self.sql, row, col, self.queryResult[row][col], data)) - else: - tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" % - (self.sql, row, col, self.queryResult[row][col], data)) + tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") def getData(self, row, col): self.checkRowCol(row, col) @@ -307,7 +289,7 @@ class TDSql: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, sql, repr(e)) tdLog.notice("%s(%d) failed: sql:%s, %s" % args) - raise Exception(repr(e)) + raise Exception(repr(e)) i+=1 time.sleep(1) pass @@ -329,7 +311,7 @@ class TDSql: tdLog.exit("%s(%d) failed: sql:%s, col_name_list:%s != expect_col_name_list:%s" % args) def __check_equal(self, elm, expect_elm): - if not type(elm) in(list, tuple) and elm == expect_elm: + if elm == expect_elm: return True if type(elm) in(list, tuple) and type(expect_elm) in(list, tuple): if len(elm) != len(expect_elm): diff --git a/tests/pytest/util/taosadapter.py b/tests/pytest/util/taosadapter.py index 614eb95d6b4871df8b6f23b9140fae008883d7a5..0307675dfbe39d7f573ee65cb8b97e0d24f9c34b 100644 --- a/tests/pytest/util/taosadapter.py +++ b/tests/pytest/util/taosadapter.py @@ -238,19 +238,23 @@ class TAdapter: if self.running != 0: psCmd = f"ps -ef|grep -w {toBeKilled}| grep -v grep | awk '{{print $2}}'" + # psCmd = f"pgrep {toBeKilled}" processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") + psCmd, shell=True) while(processID): - killCmd = f"kill {signal} {processID} > /dev/null 2>&1" + killCmd = f"pkill {signal} {processID} > /dev/null 2>&1" os.system(killCmd) time.sleep(1) processID = subprocess.check_output( psCmd, shell=True).decode("utf-8") if not platform.system().lower() == 'windows': - for port in range(6030, 6041): - fuserCmd = f"fuser -k -n tcp {port} > /dev/null" - os.system(fuserCmd) + port = 6041 + fuserCmd = f"fuser -k -n tcp {port} > /dev/null" + os.system(fuserCmd) + # for port in range(6030, 6041): + # fuserCmd = f"fuser -k -n tcp {port} > /dev/null" + # os.system(fuserCmd) self.running = 0 tdLog.debug(f"taosadapter is stopped by kill {signal}") diff --git a/tests/script/api/batchprepare.c b/tests/script/api/batchprepare.c index ada2039460b431363555025ec7984f6b2f1b354a..f39d5e6528275900350ffaefbee18d43ce9a9e81 100644 --- a/tests/script/api/batchprepare.c +++ b/tests/script/api/batchprepare.c @@ -2598,7 +2598,6 @@ void runAll(TAOS *taos) { printf("%s Begin\n", gCaseCtrl.caseCatalog); runCaseList(taos); -#if 0 strcpy(gCaseCtrl.caseCatalog, "Micro DB precision Test"); printf("%s Begin\n", gCaseCtrl.caseCatalog); gCaseCtrl.precision = TIME_PRECISION_MICRO; @@ -2654,7 +2653,6 @@ void runAll(TAOS *taos) { gCaseCtrl.bindColNum = 6; runCaseList(taos); gCaseCtrl.bindColNum = 0; -#endif /* strcpy(gCaseCtrl.caseCatalog, "Bind Col Type Test"); diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index fda5e5cb6ef7bea4be2b9e121288cfe3ddd3e339..46bae734ea72901ef704969045186a10c52a9a72 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -237,8 +237,8 @@ ./test.sh -f tsim/stream/distributeInterval0.sim ./test.sh -f tsim/stream/distributeIntervalRetrive0.sim ./test.sh -f tsim/stream/distributeSession0.sim -#./test.sh -f tsim/stream/session0.sim -#./test.sh -f tsim/stream/session1.sim +./test.sh -f tsim/stream/session0.sim +./test.sh -f tsim/stream/session1.sim ./test.sh -f tsim/stream/state0.sim ./test.sh -f tsim/stream/triggerInterval0.sim ./test.sh -f tsim/stream/triggerSession0.sim @@ -344,6 +344,7 @@ # --- scalar ---- ./test.sh -f tsim/scalar/in.sim ./test.sh -f tsim/scalar/scalar.sim +./test.sh -f tsim/scalar/filter.sim # ---- alter ---- ./test.sh -f tsim/alter/cached_schema_after_alter.sim diff --git a/tests/script/tmp/monitor.sim b/tests/script/tmp/monitor.sim index 8eb787e95035a106e0c1141a9f8d0de6584c26c3..b410e1b6ad99e8bd83dcf7dd3cf0f3c4961d0ad4 100644 --- a/tests/script/tmp/monitor.sim +++ b/tests/script/tmp/monitor.sim @@ -4,6 +4,7 @@ system sh/cfg.sh -n dnode1 -c monitorfqdn -v localhost system sh/cfg.sh -n dnode1 -c monitorport -v 80 system sh/cfg.sh -n dnode1 -c monitorInterval -v 1 system sh/cfg.sh -n dnode1 -c monitorComp -v 1 +system sh/cfg.sh -n dnode1 -c uptimeInterval -v 3 #system sh/cfg.sh -n dnode1 -c supportVnodes -v 128 #system sh/cfg.sh -n dnode1 -c telemetryReporting -v 1 @@ -14,13 +15,13 @@ system sh/cfg.sh -n dnode1 -c monitorComp -v 1 system sh/exec.sh -n dnode1 -s start sql connect -print =============== select * from information_schema.ins_dnodes +print =============== create database sql create database db vgroups 2; sql use db; sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd"; print =============== create drop qnode 1 sql create qnode on dnode 1 -sql create snode on dnode 1 -sql create bnode on dnode 1 +#sql create snode on dnode 1 +#sql create bnode on dnode 1 diff --git a/tests/script/tsim/alter/cached_schema_after_alter.sim b/tests/script/tsim/alter/cached_schema_after_alter.sim index bd2b1d272ce83525fc645451ea5a48bbaa2611be..30b879b6129c773373bea87b523fc876f838a194 100644 --- a/tests/script/tsim/alter/cached_schema_after_alter.sim +++ b/tests/script/tsim/alter/cached_schema_after_alter.sim @@ -14,7 +14,7 @@ print ========== cached_schema_after_alter.sim sql drop database $db -x step1 step1: -sql create database $db +sql create database $db print ====== create tables sql use $db @@ -32,10 +32,10 @@ if $rows != 1 then endi if $data01 != 1 then return -1 -endi +endi if $data02 != 1 then return -1 -endi +endi sql select * from $tb2 if $rows != 1 then @@ -43,10 +43,10 @@ if $rows != 1 then endi if $data01 != 1 then return -1 -endi +endi if $data02 != 1 then return -1 -endi +endi print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT @@ -61,10 +61,10 @@ if $rows != 1 then endi if $data01 != 1 then return -1 -endi +endi if $data02 != 1 then return -1 -endi +endi sql select * from $tb2 print select * from $tb2 ==> $data00 $data01 $data02 @@ -73,10 +73,10 @@ if $rows != 1 then endi if $data01 != 1 then return -1 -endi +endi if $data02 != 1 then return -1 -endi +endi $ts = $ts0 + $delta sql insert into $tb2 values ( $ts , 2, 2) @@ -86,16 +86,16 @@ if $rows != 2 then endi if $data01 != 1 then return -1 -endi +endi if $data02 != 1 then return -1 -endi +endi if $data11 != 2 then return -1 -endi +endi if $data12 != 2 then return -1 -endi +endi sql select * from $tb2 order by ts asc if $rows != 2 then @@ -103,15 +103,15 @@ if $rows != 2 then endi if $data01 != 1 then return -1 -endi +endi if $data02 != 1 then return -1 -endi +endi if $data11 != 2 then return -1 -endi +endi if $data12 != 2 then return -1 -endi +endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/alter/dnode.sim b/tests/script/tsim/alter/dnode.sim index d773c1f8a936e24d3cf19b9300f580eb2970f318..be3c385d45a58a538d786a34fe59f74e5dc38678 100644 --- a/tests/script/tsim/alter/dnode.sim +++ b/tests/script/tsim/alter/dnode.sim @@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/exec.sh -n dnode1 -s start sql connect -print ======== step1 +print ======== step1 sql alter dnode 1 'resetlog' sql alter dnode 1 'monitor' '1' sql alter dnode 1 'monitor' '0' @@ -65,4 +65,4 @@ sql alter dnode 1 balance "vnode:2-dnode:1" -x step4 step4: print ======= over -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/alter/table.sim b/tests/script/tsim/alter/table.sim index 48ab7ddab050a41e7176df86ba882589715bad44..dccfc7f5d6f9bf2afb71cd0c44dc85e722758538 100644 --- a/tests/script/tsim/alter/table.sim +++ b/tests/script/tsim/alter/table.sim @@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/exec.sh -n dnode1 -s start sql connect -print ======== step1 +print ======== step1 sql create database d1 sql use d1 sql create table tb (ts timestamp, a int) diff --git a/tests/script/tsim/bnode/basic1.sim b/tests/script/tsim/bnode/basic1.sim index 003d0ceb3d118cdc727dbb0b436cee1a4ad3ddb1..0a200016368efffbdd360c891cc6f15d3a284b47 100644 --- a/tests/script/tsim/bnode/basic1.sim +++ b/tests/script/tsim/bnode/basic1.sim @@ -7,24 +7,24 @@ sql connect print =============== select * from information_schema.ins_dnodes sql select * from information_schema.ins_dnodes; -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != 1 then +if $data00 != 1 then return -1 endi sql select * from information_schema.ins_mnodes; -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != 1 then +if $data00 != 1 then return -1 endi -if $data02 != leader then +if $data02 != leader then return -1 endi @@ -33,62 +33,62 @@ sql create dnode $hostname port 7200 sleep 2000 sql select * from information_schema.ins_dnodes; -if $rows != 2 then +if $rows != 2 then return -1 endi -if $data00 != 1 then +if $data00 != 1 then return -1 endi -if $data10 != 2 then +if $data10 != 2 then return -1 endi print $data02 -if $data02 != 0 then +if $data02 != 0 then return -1 endi -if $data12 != 0 then +if $data12 != 0 then return -1 endi -if $data04 != ready then +if $data04 != ready then return -1 endi -if $data14 != ready then +if $data14 != ready then return -1 endi sql select * from information_schema.ins_mnodes; -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != 1 then +if $data00 != 1 then return -1 endi -if $data02 != leader then +if $data02 != leader then return -1 endi #print =============== create drop bnode 1 #sql create bnode on dnode 1 #sql show bnodes -#if $rows != 1 then +#if $rows != 1 then # return -1 #endi -#if $data00 != 1 then +#if $data00 != 1 then # return -1 #endi #sql_error create bnode on dnode 1 # #sql drop bnode on dnode 1 #sql show bnodes -#if $rows != 0 then +#if $rows != 0 then # return -1 #endi #sql_error drop bnode on dnode 1 @@ -96,17 +96,17 @@ endi #print =============== create drop bnode 2 #sql create bnode on dnode 2 #sql show bnodes -#if $rows != 1 then +#if $rows != 1 then # return -1 #endi -#if $data00 != 2 then +#if $data00 != 2 then # return -1 #endi #sql_error create bnode on dnode 2 # #sql drop bnode on dnode 2 #sql show bnodes -#if $rows != 0 then +#if $rows != 0 then # return -1 #endi #sql_error drop bnode on dnode 2 @@ -115,7 +115,7 @@ endi #sql create bnode on dnode 1 #sql create bnode on dnode 2 #sql show bnodes -#if $rows != 2 then +#if $rows != 2 then # return -1 #endi @@ -127,7 +127,7 @@ endi # #sleep 2000 #sql show bnodes -#if $rows != 2 then +#if $rows != 2 then # return -1 #endi diff --git a/tests/script/tsim/column/commit.sim b/tests/script/tsim/column/commit.sim index 43aebb490256b7ba28c2ed99c47a800d4ee2e102..899d51af87198f6bb83ed4bc9fa2fc2f6be553f8 100644 --- a/tests/script/tsim/column/commit.sim +++ b/tests/script/tsim/column/commit.sim @@ -10,12 +10,12 @@ sql create table d3.mt (ts timestamp, c000 int, c001 int, c002 int, c003 int, c0 sql create table d3.t1 using d3.mt tags(1, 2, '3', 4, 5, 6) sql show tables -if $rows != 1 then +if $rows != 1 then return -1 endi sql show stables -if $rows != 1 then +if $rows != 1 then return -1 endi @@ -33,50 +33,50 @@ sql insert into d3.t1 values (now+1d,9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , print =============== step3 sql select * from d3.mt -if $rows != 10 then +if $rows != 10 then return -1 endi sql select * from d3.mt where c001 = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi sql select * from d3.mt where c002 = 2 and c003 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi sql select count(c001), count(c248), avg(c001), avg(c248), sum(c001), max(c001), min(c248), avg(c235), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*) from d3.mt print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 -if $data00 != 10 then +if $data00 != 10 then return -1 endi -if $data01 != 10 then +if $data01 != 10 then return -1 endi -if $data02 != 4.500000000 then +if $data02 != 4.500000000 then return -1 endi -if $data03 != 4.500000000 then +if $data03 != 4.500000000 then return -1 endi -if $data04 != 45 then +if $data04 != 45 then return -1 endi -if $data05 != 9 then +if $data05 != 9 then return -1 endi -if $data06 != 0 then +if $data06 != 0 then return -1 endi -if $data07 != 4.500000000 then +if $data07 != 4.500000000 then return -1 endi -if $data08 != 10 then +if $data08 != 10 then return -1 endi -if $data09 != 10 then +if $data09 != 10 then return -1 endi @@ -86,17 +86,17 @@ system sh/exec.sh -n dnode1 -s start print =============== step5 sql select * from d3.mt -if $rows != 10 then +if $rows != 10 then return -1 endi sql select * from d3.mt where c001 = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi sql select * from d3.mt where c002 = 2 and c003 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi @@ -107,35 +107,35 @@ endi sql select count(c001), count(c248), avg(c001), avg(c248), sum(c001), max(c001), min(c248), avg(c128), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*) from d3.mt print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 -if $data00 != 10 then +if $data00 != 10 then return -1 endi -if $data01 != 10 then +if $data01 != 10 then return -1 endi -if $data02 != 4.500000000 then +if $data02 != 4.500000000 then return -1 endi -if $data03 != 4.500000000 then +if $data03 != 4.500000000 then return -1 endi -if $data04 != 45 then +if $data04 != 45 then return -1 endi -if $data05 != 9 then +if $data05 != 9 then return -1 endi -if $data06 != 0 then +if $data06 != 0 then return -1 endi -if $data07 != 4.500000000 then +if $data07 != 4.500000000 then return -1 endi -if $data08 != 10 then +if $data08 != 10 then return -1 endi -if $data09 != 10 then +if $data09 != 10 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/column/metrics.sim b/tests/script/tsim/column/metrics.sim index a492f5a2f9027aba1ce332367e92a18da657ce46..6a144a15d336cf2b46497441445486d87b809fd0 100644 --- a/tests/script/tsim/column/metrics.sim +++ b/tests/script/tsim/column/metrics.sim @@ -11,12 +11,12 @@ sql create table d2.t1 using d2.mt tags(1, 2, '3', 4, 5, 6) sql create table d2.t2 using d2.mt tags(6, 7, '8', 9, 10, 11) sql show tables -if $rows != 2 then +if $rows != 2 then return -1 endi sql show stables -if $rows != 1 then +if $rows != 1 then return -1 endi @@ -53,98 +53,98 @@ sql insert into d2.t2 values (now+9m,9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , print =============== step3 sql select * from d2.mt -if $rows != 20 then +if $rows != 20 then return -1 endi sql select * from d2.mt where ts < now + 4m -if $rows != 10 then +if $rows != 10 then return -1 endi sql select * from d2.mt where c001 = 1 -if $rows != 2 then +if $rows != 2 then return -1 endi sql select * from d2.mt where c002 = 2 and c003 = 2 -if $rows != 2 then +if $rows != 2 then return -1 endi sql select * from d2.mt where c002 = 2 and c003 = 2 and ts < now + 4m -if $rows != 2 then +if $rows != 2 then return -1 endi sql select count(*) from d2.mt -if $data00 != 20 then +if $data00 != 20 then return -1 endi sql select count(c001), count(c248), avg(c001), avg(c248), sum(c001), max(c001), min(c248), avg(c235), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*) from d2.mt print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 -if $data00 != 20 then +if $data00 != 20 then return -1 endi -if $data01 != 20 then +if $data01 != 20 then return -1 endi -if $data02 != 4.500000000 then +if $data02 != 4.500000000 then return -1 endi -if $data03 != 4.500000000 then +if $data03 != 4.500000000 then return -1 endi -if $data04 != 90 then +if $data04 != 90 then return -1 endi -if $data05 != 9 then +if $data05 != 9 then return -1 endi -if $data06 != 0 then +if $data06 != 0 then return -1 endi -if $data07 != 4.500000000 then +if $data07 != 4.500000000 then return -1 endi -if $data08 != 20 then +if $data08 != 20 then return -1 endi -if $data09 != 20 then +if $data09 != 20 then return -1 endi sql select count(c001), count(c248), avg(c001), avg(c248), sum(c001), max(c001), min(c248), avg(c238), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*) from d2.mt where a = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 -if $data00 != 10 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 +if $data00 != 10 then return -1 endi -if $data01 != 10 then +if $data01 != 10 then return -1 endi -if $data02 != 4.500000000 then +if $data02 != 4.500000000 then return -1 endi -if $data03 != 4.500000000 then +if $data03 != 4.500000000 then return -1 endi -if $data04 != 45 then +if $data04 != 45 then return -1 endi -if $data05 != 9 then +if $data05 != 9 then return -1 endi -if $data06 != 0 then +if $data06 != 0 then return -1 endi -if $data07 != 4.500000000 then +if $data07 != 4.500000000 then return -1 endi -if $data08 != 10 then +if $data08 != 10 then return -1 endi -if $data09 != 10 then +if $data09 != 10 then return -1 endi @@ -154,56 +154,56 @@ system sh/exec.sh -n dnode1 -s start print =============== step5 sql select * from d2.mt -if $rows != 20 then +if $rows != 20 then return -1 endi sql select * from d2.mt where c001 = 1 -if $rows != 2 then +if $rows != 2 then return -1 endi sql select * from d2.mt where c002 = 2 and c003 = 2 -if $rows != 2 then +if $rows != 2 then return -1 endi sql select count(*) from d2.mt -if $data00 != 20 then +if $data00 != 20 then return -1 endi sql select count(c001), count(c248), avg(c001), avg(c248), sum(c001), max(c001), min(c248), avg(c128), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*) from d2.mt print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 -if $data00 != 20 then +if $data00 != 20 then return -1 endi -if $data01 != 20 then +if $data01 != 20 then return -1 endi -if $data02 != 4.500000000 then +if $data02 != 4.500000000 then return -1 endi -if $data03 != 4.500000000 then +if $data03 != 4.500000000 then return -1 endi -if $data04 != 90 then +if $data04 != 90 then return -1 endi -if $data05 != 9 then +if $data05 != 9 then return -1 endi -if $data06 != 0 then +if $data06 != 0 then return -1 endi -if $data07 != 4.500000000 then +if $data07 != 4.500000000 then return -1 endi -if $data08 != 20 then +if $data08 != 20 then return -1 endi -if $data09 != 20 then +if $data09 != 20 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/column/table.sim b/tests/script/tsim/column/table.sim index 07948ebce36f88ae5eef2de51fd11e352d9af86a..4f1d32c373e3712275deec14a54e7efa0e77de61 100644 --- a/tests/script/tsim/column/table.sim +++ b/tests/script/tsim/column/table.sim @@ -9,7 +9,7 @@ sql use d1 sql create table d1.t1 (ts timestamp, c000 int, c001 int, c002 int, c003 int, c004 int, c005 int, c006 int, c007 int, c008 int, c009 int, c010 int, c011 int, c012 int, c013 int, c014 int, c015 int, c016 int, c017 int, c018 int, c019 int, c020 int, c021 int, c022 int, c023 int, c024 int, c025 int, c026 int, c027 int, c028 int, c029 int, c030 int, c031 int, c032 int, c033 int, c034 int, c035 int, c036 int, c037 int, c038 int, c039 int, c040 int, c041 int, c042 int, c043 int, c044 int, c045 int, c046 int, c047 int, c048 int, c049 int, c050 int, c051 int, c052 int, c053 int, c054 int, c055 int, c056 int, c057 int, c058 int, c059 int, c060 int, c061 int, c062 int, c063 int, c064 int, c065 int, c066 int, c067 int, c068 int, c069 int, c070 int, c071 int, c072 int, c073 int, c074 int, c075 int, c076 int, c077 int, c078 int, c079 int, c080 int, c081 int, c082 int, c083 int, c084 int, c085 int, c086 int, c087 int, c088 int, c089 int, c090 int, c091 int, c092 int, c093 int, c094 int, c095 int, c096 int, c097 int, c098 int, c099 int, c100 int, c101 int, c102 int, c103 int, c104 int, c105 int, c106 int, c107 int, c108 int, c109 int, c110 int, c111 int, c112 int, c113 int, c114 int, c115 int, c116 int, c117 int, c118 int, c119 int, c120 int, c121 int, c122 int, c123 int, c124 int, c125 int, c126 int, c127 int, c128 int, c129 int, c130 int, c131 int, c132 int, c133 int, c134 int, c135 int, c136 int, c137 int, c138 int, c139 int, c140 int, c141 int, c142 int, c143 int, c144 int, c145 int, c146 int, c147 int, c148 int, c149 int, c150 int, c151 int, c152 int, c153 int, c154 int, c155 int, c156 int, c157 int, c158 int, c159 int, c160 int, c161 int, c162 int, c163 int, c164 int, c165 int, c166 int, c167 int, c168 int, c169 int, c170 int, c171 int, c172 int, c173 int, c174 int, c175 int, c176 int, c177 int, c178 int, c179 int, c180 int, c181 int, c182 int, c183 int, c184 int, c185 int, c186 int, c187 int, c188 int, c189 int, c190 int, c191 int, c192 int, c193 int, c194 int, c195 int, c196 int, c197 int, c198 int, c199 int, c200 int, c201 int, c202 int, c203 int, c204 int, c205 int, c206 int, c207 int, c208 int, c209 int, c210 int, c211 int, c212 int, c213 int, c214 int, c215 int, c216 int, c217 int, c218 int, c219 int, c220 int, c221 int, c222 int, c223 int, c224 int, c225 int, c226 int, c227 int, c228 int, c229 int, c230 int, c231 int, c232 int, c233 int, c234 int, c235 int, c236 int, c237 int, c238 int, c239 int, c240 int, c241 int, c242 int, c243 int, c244 int, c245 int, c246 int, c247 int, c248 int, c249 int, c250 int) sql show tables -if $rows != 1 then +if $rows != 1 then return -1 endi @@ -32,91 +32,91 @@ sql insert into d1.t1 values (now+9m,9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , print ======= step3 sql select * from d1.t1 print select * from d1.t1 => rows $rows -if $rows != 10 then +if $rows != 10 then return -1 endi sql select * from d1.t1 where ts < now + 4m print select * from d1.t1 where ts < now + 4m => rows $rows -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from d1.t1 where c001 = 1 print select * from d1.t1 where c001 = 1 => rows $rows -if $rows != 1 then +if $rows != 1 then return -1 endi sql select * from d1.t1 where c002 = 2 and c003 = 2 print select * from d1.t1 where c002 = 2 and c003 = 2 => rows $rows -if $rows != 1 then +if $rows != 1 then return -1 endi sql select * from d1.t1 where c002 = 2 and c003 = 2 and ts < now + 4m print select * from d1.t1 where c002 = 2 and c003 = 2 and ts < now + 4m => rows $rows -if $rows != 1 then +if $rows != 1 then return -1 endi sql select count(*) from d1.t1 print select count(*) from d1.t1 => $data00 -if $data00 != 10 then +if $data00 != 10 then return -1 endi sql select count(c001), count(c250), avg(c001), avg(c250), sum(c001), max(c001), min(c250), stddev(c250) from d1.t1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 -if $data00 != 10 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 +if $data00 != 10 then return -1 endi -if $data01 != 10 then +if $data01 != 10 then return -1 endi -if $data02 != 4.500000000 then +if $data02 != 4.500000000 then return -1 endi -if $data03 != 4.500000000 then +if $data03 != 4.500000000 then return -1 endi -if $data04 != 45 then +if $data04 != 45 then return -1 endi -if $data05 != 9 then +if $data05 != 9 then return -1 endi -if $data06 != 0 then +if $data06 != 0 then return -1 endi -if $data07 != 2.872281323 then +if $data07 != 2.872281323 then return -1 endi -sql select count(c001), count(c250), avg(c001), avg(c250), sum(c001), max(c001), min(c250), stddev(c250), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*) from d1.t1 +sql select count(c001), count(c250), avg(c001), avg(c250), sum(c001), max(c001), min(c250), stddev(c250), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*) from d1.t1 -if $data00 != 10 then +if $data00 != 10 then return -1 endi -if $data01 != 10 then +if $data01 != 10 then return -1 endi -if $data02 != 4.500000000 then +if $data02 != 4.500000000 then return -1 endi -if $data03 != 4.500000000 then +if $data03 != 4.500000000 then return -1 endi -if $data04 != 45 then +if $data04 != 45 then return -1 endi -if $data05 != 9 then +if $data05 != 9 then return -1 endi -if $data06 != 0 then +if $data06 != 0 then return -1 endi -if $data07 != 2.872281323 then +if $data07 != 2.872281323 then return -1 endi @@ -128,79 +128,79 @@ print ============== step5 sql select * from d1.t1 print select * from d1.t1 => rows $rows -if $rows != 10 then +if $rows != 10 then return -1 endi sql select * from d1.t1 where c001 = 1 print select * from d1.t1 where c001 = 1 => rows $rows -if $rows != 1 then +if $rows != 1 then return -1 endi sql select * from d1.t1 where c002 = 2 and c003 = 2 print select * from d1.t1 where c002 = 2 and c003 = 2 => rows $rows -if $rows != 1 then +if $rows != 1 then return -1 endi sql select count(*) from d1.t1 print select count(*) from d1.t1 => $data00 -if $data00 != 10 then +if $data00 != 10 then return -1 endi sql select count(c001), count(c250), avg(c001), avg(c250), sum(c001), max(c001), min(c250), stddev(c250) from d1.t1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 -if $data00 != 10 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 +if $data00 != 10 then return -1 endi -if $data01 != 10 then +if $data01 != 10 then return -1 endi -if $data02 != 4.500000000 then +if $data02 != 4.500000000 then return -1 endi -if $data03 != 4.500000000 then +if $data03 != 4.500000000 then return -1 endi -if $data04 != 45 then +if $data04 != 45 then return -1 endi -if $data05 != 9 then +if $data05 != 9 then return -1 endi -if $data06 != 0 then +if $data06 != 0 then return -1 endi -if $data07 != 2.872281323 then +if $data07 != 2.872281323 then return -1 endi sql select count(c001), count(c250), avg(c001), avg(c250), sum(c001), max(c001), min(c250), stddev(c250), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*), count(*) from d1.t1 -if $data00 != 10 then +if $data00 != 10 then return -1 endi -if $data01 != 10 then +if $data01 != 10 then return -1 endi -if $data02 != 4.500000000 then +if $data02 != 4.500000000 then return -1 endi -if $data03 != 4.500000000 then +if $data03 != 4.500000000 then return -1 endi -if $data04 != 45 then +if $data04 != 45 then return -1 endi -if $data05 != 9 then +if $data05 != 9 then return -1 endi -if $data06 != 0 then +if $data06 != 0 then return -1 endi -if $data07 != 2.872281323 then +if $data07 != 2.872281323 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compress/commitlog.sim b/tests/script/tsim/compress/commitlog.sim index bc9c231a9ec311fc500a4fb59bb46173ba19cf0e..38899b95ba9fbd7d156991c98279db878c8881d0 100644 --- a/tests/script/tsim/compress/commitlog.sim +++ b/tests/script/tsim/compress/commitlog.sim @@ -25,7 +25,7 @@ while $count < $N endw sql select * from $tb -if $rows != $N then +if $rows != $N then return -1 endi @@ -46,7 +46,7 @@ while $count < $N endw sql select * from $tb -if $rows != $N then +if $rows != $N then return -1 endi @@ -67,7 +67,7 @@ while $count < $N endw sql select * from $tb -if $rows != $N then +if $rows != $N then return -1 endi @@ -83,7 +83,7 @@ $tb = $tbPrefix . $i sql use $db sql select * from $tb print select * from $tb ==> $rows points -if $rows != $N then +if $rows != $N then return -1 endi @@ -93,18 +93,18 @@ $tb = $tbPrefix . $i sql use $db sql select * from $tb print select * from $tb ==> $rows points -if $rows != $N then +if $rows != $N then return -1 endi - + $i = 2 $db = $dbPrefix . $i $tb = $tbPrefix . $i sql use $db sql select * from $tb print select * from $tb ==> $rows points -if $rows != $N then +if $rows != $N then return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compress/compress.sim b/tests/script/tsim/compress/compress.sim index 766f97450c3b9469b2788fb12cb5099d91cc4163..4752f1ba50460901f98ec08bd97baba7b216b187 100644 --- a/tests/script/tsim/compress/compress.sim +++ b/tests/script/tsim/compress/compress.sim @@ -25,7 +25,7 @@ while $count < $N endw sql select * from $tb -if $rows != $N then +if $rows != $N then return -1 endi @@ -47,7 +47,7 @@ while $count < $N endw sql select * from $tb -if $rows != $N then +if $rows != $N then return -1 endi @@ -69,7 +69,7 @@ while $count < $N endw sql select * from $tb -if $rows != $N then +if $rows != $N then return -1 endi @@ -85,7 +85,7 @@ $tb = $tbPrefix . $i sql use $db sql select * from $tb print select * from $tb ==> $rows points -if $rows != $N then +if $rows != $N then return -1 endi @@ -95,18 +95,18 @@ $tb = $tbPrefix . $i sql use $db sql select * from $tb print select * from $tb ==> $rows points -if $rows != $N then +if $rows != $N then return -1 endi - + $i = 2 $db = $dbPrefix . $i $tb = $tbPrefix . $i sql use $db sql select * from $tb print select * from $tb ==> $rows points -if $rows != $N then +if $rows != $N then return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compress/compress2.sim b/tests/script/tsim/compress/compress2.sim index 87e50cce5bd273abcf82bf960adee78b96bac774..c55b74f2466c8d24e87c693b23321df5a372704c 100644 --- a/tests/script/tsim/compress/compress2.sim +++ b/tests/script/tsim/compress/compress2.sim @@ -26,7 +26,7 @@ while $count < $N endw sql select * from $tb -if $rows != $N then +if $rows != $N then return -1 endi @@ -48,7 +48,7 @@ while $count < $N endw sql select * from $tb -if $rows != $N then +if $rows != $N then return -1 endi @@ -70,7 +70,7 @@ while $count < $N endw sql select * from $tb -if $rows != $N then +if $rows != $N then return -1 endi @@ -86,7 +86,7 @@ $tb = $tbPrefix . $i sql use $db sql select * from $tb print select * from $tb ==> $rows points -if $rows != $N then +if $rows != $N then return -1 endi @@ -96,18 +96,18 @@ $tb = $tbPrefix . $i sql use $db sql select * from $tb print select * from $tb ==> $rows points -if $rows != $N then +if $rows != $N then return -1 endi - + $i = 2 $db = $dbPrefix . $i $tb = $tbPrefix . $i sql use $db sql select * from $tb print select * from $tb ==> $rows points -if $rows != $N then +if $rows != $N then return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compress/uncompress.sim b/tests/script/tsim/compress/uncompress.sim index ccd5db4b0cf473d763a761587830212729cf9810..f48fc6da2321f51742dd971c5e9ae05fb26cb827 100644 --- a/tests/script/tsim/compress/uncompress.sim +++ b/tests/script/tsim/compress/uncompress.sim @@ -26,7 +26,7 @@ while $count < $N endw sql select * from $tb -if $rows != $N then +if $rows != $N then return -1 endi @@ -48,7 +48,7 @@ while $count < $N endw sql select * from $tb -if $rows != $N then +if $rows != $N then return -1 endi @@ -70,7 +70,7 @@ while $count < $N endw sql select * from $tb -if $rows != $N then +if $rows != $N then return -1 endi @@ -85,7 +85,7 @@ $tb = $tbPrefix . $i sql use $db sql select * from $tb print select * from $tb ==> $rows points -if $rows != $N then +if $rows != $N then return -1 endi @@ -95,18 +95,18 @@ $tb = $tbPrefix . $i sql use $db sql select * from $tb print select * from $tb ==> $rows points -if $rows != $N then +if $rows != $N then return -1 endi - + $i = 2 $db = $dbPrefix . $i $tb = $tbPrefix . $i sql use $db sql select * from $tb print select * from $tb ==> $rows points -if $rows != $N then +if $rows != $N then return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compute/avg.sim b/tests/script/tsim/compute/avg.sim index 41a3a4825165d0c92c6215cdfd15c646c4204dc3..2f7e9b83b09287067a61fc1c1cd2cac889543648 100644 --- a/tests/script/tsim/compute/avg.sim +++ b/tests/script/tsim/compute/avg.sim @@ -25,18 +25,18 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , $x ) + sql insert into $tb values ($ms , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw print =============== step2 $i = 1 @@ -44,7 +44,7 @@ $tb = $tbPrefix . $i sql select avg(tbcol) from $tb print ===> $data00 -if $data00 != 9.500000000 then +if $data00 != 9.500000000 then return -1 endi @@ -53,27 +53,27 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select avg(tbcol) from $tb where ts <= $ms print ===> $data00 -if $data00 != 2.000000000 then +if $data00 != 2.000000000 then return -1 endi print =============== step4 sql select avg(tbcol) as b from $tb print ===> $data00 -if $data00 != 9.500000000 then +if $data00 != 9.500000000 then return -1 endi print =============== step5 sql select avg(tbcol) as b from $tb interval(1m) print ===> $data01 -if $data10 != 1.000000000 then +if $data10 != 1.000000000 then return -1 endi sql select avg(tbcol) as b from $tb interval(1d) print ===> $data01 -if $data00 != 9.500000000 then +if $data00 != 9.500000000 then return -1 endi @@ -82,17 +82,17 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select avg(tbcol) as b from $tb where ts <= $ms interval(1m) print ===> $data01 -if $data40 != 4.000000000 then +if $data40 != 4.000000000 then return -1 endi -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step7 sql select avg(tbcol) from $mt print ===> $data00 -if $data00 != 9.500000000 then +if $data00 != 9.500000000 then return -1 endi @@ -101,13 +101,13 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select avg(tbcol) as c from $mt where ts <= $ms print ===> $data00 -if $data00 != 2.000000000 then +if $data00 != 2.000000000 then return -1 endi sql select avg(tbcol) as c from $mt where tgcol < 5 print ===> $data00 -if $data00 != 9.500000000 then +if $data00 != 9.500000000 then return -1 endi @@ -115,31 +115,31 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select avg(tbcol) as c from $mt where tgcol < 5 and ts <= $ms print ===> $data00 -if $data00 != 2.000000000 then +if $data00 != 2.000000000 then return -1 endi print =============== step9 sql select avg(tbcol) as b from $mt interval(1m) print ===> $data10 -if $data10 != 1.000000000 then +if $data10 != 1.000000000 then return -1 endi sql select avg(tbcol) as b from $mt interval(1d) print ===> $data01 -if $data00 != 9.500000000 then +if $data00 != 9.500000000 then return -1 endi print =============== step10 sql select avg(tbcol) as b from $mt group by tgcol print ===> $data00 -if $data00 != 9.500000000 then +if $data00 != 9.500000000 then return -1 endi -if $rows != $tbNum then +if $rows != $tbNum then return -1 endi @@ -148,18 +148,18 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select avg(tbcol) as b from $mt where ts <= $ms partition by tgcol interval(1m) print ===> $data10 -if $data10 != 1.000000000 then +if $data10 != 1.000000000 then return -1 endi -if $rows != 50 then +if $rows != 50 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compute/block_dist.sim b/tests/script/tsim/compute/block_dist.sim index ad3357515302eec632f21a99ca8e7768d702441b..2d0a4e890267af3520e8c99f21a4d87c5bc1ceef 100644 --- a/tests/script/tsim/compute/block_dist.sim +++ b/tests/script/tsim/compute/block_dist.sim @@ -27,25 +27,25 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , $x ) + sql insert into $tb values ($ms , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw sql create table $nt (ts timestamp, tbcol int) $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - sql insert into $nt values ($ms , $x ) - $x = $x + 1 + sql insert into $nt values ($ms , $x ) + $x = $x + 1 endw sql flush database $db @@ -94,7 +94,7 @@ sql_error select _block_dist() from (select * from $mt) print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi diff --git a/tests/script/tsim/compute/bottom.sim b/tests/script/tsim/compute/bottom.sim index 141d7f314bb6347a5fae974be84df0c4e912a523..4ccaaf84129af40114d7b024b267d9d03f829782 100644 --- a/tests/script/tsim/compute/bottom.sim +++ b/tests/script/tsim/compute/bottom.sim @@ -25,18 +25,18 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , $x ) + sql insert into $tb values ($ms , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw print =============== step2 $i = 1 @@ -44,7 +44,7 @@ $tb = $tbPrefix . $i sql select bottom(tbcol, 1) from $tb print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi @@ -53,24 +53,24 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select bottom(tbcol, 1) from $tb where ts > $ms print ===> $data00 -if $data00 != 5 then +if $data00 != 5 then return -1 endi print =============== step4 sql select bottom(tbcol, 1) as b from $tb print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi print =============== step5 -sql select bottom(tbcol, 2) as b from $tb +sql select bottom(tbcol, 2) as b from $tb print ===> $data00 $data10 -if $data00 != 1 then +if $data00 != 1 then return -1 endi -if $data10 != 0 then +if $data10 != 0 then return -1 endi @@ -79,10 +79,10 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select bottom(tbcol, 2) as b from $tb where ts > $ms print ===> $data00 $data10 -if $data00 != 6 then +if $data00 != 6 then return -1 endi -if $data10 != 5 then +if $data10 != 5 then return -1 endi @@ -93,8 +93,8 @@ step6: print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compute/count.sim b/tests/script/tsim/compute/count.sim index ae8a85155931c85b78ad21d0248c7d789ed0f647..44038d5195100912b75b323d38163ae54429c1d4 100644 --- a/tests/script/tsim/compute/count.sim +++ b/tests/script/tsim/compute/count.sim @@ -25,18 +25,18 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , $x ) + sql insert into $tb values ($ms , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw print =============== step2 $i = 1 @@ -44,19 +44,19 @@ $tb = $tbPrefix . $i sql select count(*) from $tb print ===> select count(*) from $tb => $data00 -if $data00 != $rowNum then +if $data00 != $rowNum then return -1 endi sql select count(1) from $tb print ===> select count(1) from $tb => $data00 -if $data00 != $rowNum then +if $data00 != $rowNum then return -1 endi sql select count(tbcol) from $tb print ===> $data00 -if $data00 != $rowNum then +if $data00 != $rowNum then return -1 endi @@ -65,27 +65,27 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select count(tbcol) from $tb where ts <= $ms print ===> $data00 -if $data00 != 5 then +if $data00 != 5 then return -1 endi print =============== step4 sql select count(tbcol) as b from $tb print ===> $data00 -if $data00 != $rowNum then +if $data00 != $rowNum then return -1 endi print =============== step5 sql select count(tbcol) as b from $tb interval(1m) print ===> $data00 -if $data00 != 1 then +if $data00 != 1 then return -1 endi sql select count(tbcol) as b from $tb interval(1d) print ===> $data00 -if $data00 != $rowNum then +if $data00 != $rowNum then return -1 endi @@ -94,30 +94,30 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select count(tbcol) as b from $tb where ts <= $ms interval(1m) print ===> $data00 -if $data00 != 1 then +if $data00 != 1 then return -1 endi -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step7 sql select count(*) from $mt print ===> $data00 -if $data00 != $totalNum then +if $data00 != $totalNum then return -1 endi print =============== step8 sql select count(1) from $mt print ===> $data00 -if $data00 != $totalNum then +if $data00 != $totalNum then return -1 endi sql select count(tbcol) from $mt print ===> $data00 -if $data00 != $totalNum then +if $data00 != $totalNum then return -1 endi @@ -126,13 +126,13 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select count(tbcol) as c from $mt where ts <= $ms print ===> $data00 -if $data00 != 50 then +if $data00 != 50 then return -1 endi sql select count(tbcol) as c from $mt where tgcol < 5 print ===> $data00 -if $data00 != 100 then +if $data00 != 100 then return -1 endi @@ -140,34 +140,34 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select count(tbcol) as c from $mt where tgcol < 5 and ts <= $ms print ===> $data00 -if $data00 != 25 then +if $data00 != 25 then return -1 endi print =============== step9 sql select count(tbcol) as b from $mt interval(1m) print ===> $data00 -if $data00 != 10 then +if $data00 != 10 then return -1 endi -if $data10 != 10 then +if $data10 != 10 then return -1 endi sql select count(tbcol) as b from $mt interval(1d) print ===> $data00 -if $data00 != 200 then +if $data00 != 200 then return -1 endi print =============== step10 sql select count(tbcol) as b from $mt group by tgcol print ===> $data00 -if $data00 != $rowNum then +if $data00 != $rowNum then return -1 endi -if $rows != $tbNum then +if $rows != $tbNum then return -1 endi @@ -176,17 +176,17 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select count(tbcol) as b from $mt where ts <= $ms partition by tgcol interval(1m) print ===> $data01 -if $data00 != 1 then +if $data00 != 1 then return -1 endi -if $rows != 50 then +if $rows != 50 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi diff --git a/tests/script/tsim/compute/diff.sim b/tests/script/tsim/compute/diff.sim index 0882b835c8ff7be7a58d160fc7871b64a3b6c091..7e69f40b9722bc015b354fb35bdeceb8affbf807 100644 --- a/tests/script/tsim/compute/diff.sim +++ b/tests/script/tsim/compute/diff.sim @@ -25,17 +25,17 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , $x ) + sql insert into $tb values ($ms , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw print =============== step2 $i = 1 @@ -43,7 +43,7 @@ $tb = $tbPrefix . $i sql select diff(tbcol) from $tb print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi @@ -52,7 +52,7 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select diff(tbcol) from $tb where ts > $ms print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi @@ -60,14 +60,14 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select diff(tbcol) from $tb where ts <= $ms print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi print =============== step4 sql select diff(tbcol) as b from $tb print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi @@ -86,8 +86,8 @@ step6: print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compute/diff2.sim b/tests/script/tsim/compute/diff2.sim index bd8a1223be438eb3c8da6c1d9d9861cdc9cb28e9..1cc2a87839f84f2b742fbb0667c373ad1923ea15 100644 --- a/tests/script/tsim/compute/diff2.sim +++ b/tests/script/tsim/compute/diff2.sim @@ -26,19 +26,19 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc $tinyint = $x / 128 - sql insert into $tb values ($ms , $x , $x , $x , $x , $tinyint , $x , $x , $x , $x ) + sql insert into $tb values ($ms , $x , $x , $x , $x , $tinyint , $x , $x , $x , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw print =============== step2 $i = 1 @@ -46,7 +46,7 @@ $tb = $tbPrefix . $i sql select diff(c1) from $tb print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi sql select diff(c2) from $tb @@ -56,22 +56,22 @@ if $data10 != 1.000000000 then endi sql select diff(c3) from $tb print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi sql select diff(c4) from $tb print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi sql select diff(c5) from $tb print ===> $data10 -if $data10 != 0 then +if $data10 != 0 then return -1 endi sql select diff(c6) from $tb print ===> $data10 -if $data10 != 1.000000000 then +if $data10 != 1.000000000 then return -1 endi @@ -90,7 +90,7 @@ sql_error select diff(c1) from m_di_tb1 where c2 like '2%' print =============== step3 sql select diff(c1) from $tb where c1 > 5 print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi sql select diff(c2) from $tb where c2 > 5 @@ -100,38 +100,38 @@ if $data10 != 1.000000000 then endi sql select diff(c3) from $tb where c3 > 5 print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi sql select diff(c4) from $tb where c4 > 5 print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi sql select diff(c5) from $tb where c5 > 5 print ===> $data10 -if $data10 != 0 then +if $data10 != 0 then return -1 endi sql select diff(c6) from $tb where c6 > 5 print ===> $data10 -if $data10 != 1.000000000 then +if $data10 != 1.000000000 then return -1 endi print =============== step4 sql select diff(c1) from $tb where c1 > 5 and c2 < $rowNum print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi sql select diff(c1) from $tb where c9 like '%9' and c1 <= 20 -if $rows != 1 then +if $rows != 1 then return -1 endi print ===> $data10 -if $data00 != 10 then +if $data00 != 10 then return -1 endi @@ -148,8 +148,8 @@ step6: print =============== clear #sql drop database $db #sql select * from information_schema.ins_databases -#if $rows != 2 then +#if $rows != 2 then # return -1 #endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compute/first.sim b/tests/script/tsim/compute/first.sim index 8595416c0740b31c34ac176d0b022b485d8c9515..954664a4c683e0a10a0e8cd6de799514cb34ba44 100644 --- a/tests/script/tsim/compute/first.sim +++ b/tests/script/tsim/compute/first.sim @@ -25,18 +25,18 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , $x ) + sql insert into $tb values ($ms , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw print =============== step2 $i = 1 @@ -44,7 +44,7 @@ $tb = $tbPrefix . $i sql select first(tbcol) from $tb print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi @@ -53,27 +53,27 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select first(tbcol) from $tb where ts <= $ms print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi print =============== step4 sql select first(tbcol) as b from $tb print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi print =============== step5 sql select first(tbcol) as b from $tb interval(1m) print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi sql select first(tbcol) as b from $tb interval(1d) print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi @@ -82,17 +82,17 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select first(tbcol) as b from $tb where ts <= $ms interval(1m) print ===> $data00 -if $data40 != 4 then +if $data40 != 4 then return -1 endi -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step7 sql select first(tbcol) from $mt print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi @@ -101,13 +101,13 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select first(tbcol) as c from $mt where ts <= $ms print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi sql select first(tbcol) as c from $mt where tgcol < 5 print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi @@ -115,7 +115,7 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select first(tbcol) as c from $mt where tgcol < 5 and ts <= $ms print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi @@ -123,24 +123,24 @@ print =============== step9 sql select first(tbcol) as b from $mt interval(1m) print select first(tbcol) as b from $mt interval(1m) print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi sql select first(tbcol) as b from $mt interval(1d) print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi print =============== step10 sql select first(tbcol) as b from $mt group by tgcol print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi -if $rows != $tbNum then +if $rows != $tbNum then return -1 endi @@ -149,19 +149,19 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select first(tbcol) as b from $mt where ts <= $ms partition by tgcol interval(1m) print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi print ===> $rows -if $rows != 50 then +if $rows != 50 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compute/interval.sim b/tests/script/tsim/compute/interval.sim index 903e80769b84c723ae21a9d20e6174a1fc54f97a..dc11c20ec925be39d12d2a7d1e92bbcb1da830b1 100644 --- a/tests/script/tsim/compute/interval.sim +++ b/tests/script/tsim/compute/interval.sim @@ -25,18 +25,18 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , $x ) + sql insert into $tb values ($ms , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw print =============== step2 $i = 1 @@ -44,13 +44,13 @@ $tb = $tbPrefix . $i sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $tb interval(1m) print ===> $rows -if $rows < $rowNum then +if $rows < $rowNum then return -1 endi -if $data00 != 1 then +if $data00 != 1 then return -1 endi -if $data04 != 1 then +if $data04 != 1 then return -1 endi @@ -59,16 +59,16 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $tb where ts <= $ms interval(1m) print ===> $rows -if $rows > 10 then +if $rows > 10 then return -1 endi -if $rows < 3 then +if $rows < 3 then return -1 endi -if $data00 != 1 then +if $data00 != 1 then return -1 endi -if $data04 != 1 then +if $data04 != 1 then return -1 endi @@ -81,16 +81,16 @@ $ms2 = 1601481600000 - $cc sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $tb where ts <= $ms and ts > $ms2 interval(1m) print ===> $rows -if $rows < 18 then +if $rows < 18 then return -1 endi -if $rows > 22 then +if $rows > 22 then return -1 endi -if $data00 != 1 then +if $data00 != 1 then return -1 endi -if $data04 != 1 then +if $data04 != 1 then return -1 endi @@ -101,35 +101,35 @@ $ms = 1601481600000 + $cc $cc = 1 * 60000 $ms2 = 1601481600000 - $cc -sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $tb where ts <= $ms and ts > $ms2 interval(1m) fill(value,0) +sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $tb where ts <= $ms and ts > $ms2 interval(1m) fill(value,0,0,0,0,0) print ===> $rows if $rows < 30 then print expect greater than 30, actual: $rows return -1 endi -if $rows > 50 then +if $rows > 50 then return -1 endi -if $data20 != 1 then +if $data20 != 1 then return -1 endi -if $data24 != 1 then +if $data24 != 1 then return -1 endi print =============== step6 sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $mt interval(1m) print ===> $rows -if $rows < 18 then +if $rows < 18 then return -1 endi -if $rows > 22 then +if $rows > 22 then return -1 endi -if $data10 > 15 then +if $data10 > 15 then return -1 endi -if $data10 < 5 then +if $data10 < 5 then return -1 endi @@ -138,16 +138,16 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $mt where ts <= $ms interval(1m) print ===> $rows -if $rows < 3 then +if $rows < 3 then return -1 endi -if $rows > 7 then +if $rows > 7 then return -1 endi -if $data10 > 15 then +if $data10 > 15 then return -1 endi -if $data10 < 5 then +if $data10 < 5 then return -1 endi @@ -160,16 +160,16 @@ $ms2 = 1601481600000 - $cc sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $mt where ts <= $ms1 and ts > $ms2 interval(1m) print ===> $rows -if $rows < 18 then +if $rows < 18 then return -1 endi -if $rows > 22 then +if $rows > 22 then return -1 endi -if $data10 > 15 then +if $data10 > 15 then return -1 endi -if $data10 < 5 then +if $data10 < 5 then return -1 endi @@ -180,25 +180,25 @@ $ms1 = 1601481600000 + $cc $cc = 1 * 60000 $ms2 = 1601481600000 - $cc -sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $mt where ts <= $ms1 and ts > $ms2 interval(1m) fill(value, 0) -if $rows < 30 then +sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $mt where ts <= $ms1 and ts > $ms2 interval(1m) fill(value, 0,0,0,0,0) +if $rows < 30 then return -1 endi -if $rows > 50 then +if $rows > 50 then return -1 endi -if $data10 > 15 then +if $data10 > 15 then return -1 endi -if $data10 < 5 then +if $data10 < 5 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compute/last.sim b/tests/script/tsim/compute/last.sim index be2ee47733564bbbbb056dc28d8fc69eaa886ae6..e57236e57c4c0bc5ebccacfd2ccd9bb309b6bb8f 100644 --- a/tests/script/tsim/compute/last.sim +++ b/tests/script/tsim/compute/last.sim @@ -25,18 +25,18 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , $x ) + sql insert into $tb values ($ms , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw print =============== step2 $i = 1 @@ -44,7 +44,7 @@ $tb = $tbPrefix . $i sql select last(tbcol) from $tb print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi @@ -54,27 +54,27 @@ $ms = 1601481600000 + $cc sql select last(tbcol) from $tb where ts <= $ms print ===> $data00 -if $data00 != 4 then +if $data00 != 4 then return -1 endi print =============== step4 sql select last(tbcol) as b from $tb print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi print =============== step5 sql select last(tbcol) as b from $tb interval(1m) print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi sql select last(tbcol) as b from $tb interval(1d) print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi @@ -84,17 +84,17 @@ $ms = 1601481600000 + $cc sql select last(tbcol) as b from $tb where ts <= $ms interval(1m) print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step7 sql select last(tbcol) from $mt print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi @@ -104,13 +104,13 @@ $ms = 1601481600000 + $cc sql select last(tbcol) as c from $mt where ts <= $ms print ===> $data00 -if $data00 != 4 then +if $data00 != 4 then return -1 endi sql select last(tbcol) as c from $mt where tgcol < 5 print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi @@ -119,31 +119,31 @@ $ms = 1601481600000 + $cc sql select last(tbcol) as c from $mt where tgcol < 5 and ts <= $ms print ===> $data00 -if $data00 != 4 then +if $data00 != 4 then return -1 endi print =============== step9 sql select last(tbcol) as b from $mt interval(1m) print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi sql select last(tbcol) as b from $mt interval(1d) print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi print =============== step10 sql select last(tbcol) as b from $mt group by tgcol print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi -if $rows != $tbNum then +if $rows != $tbNum then return -1 endi @@ -153,19 +153,19 @@ $ms = 1601481600000 + $cc sql select last(tbcol) as b from $mt where ts <= $ms partition by tgcol interval(1m) print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi print ===> $rows -if $rows != 50 then +if $rows != 50 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compute/last_row.sim b/tests/script/tsim/compute/last_row.sim index 57bdc36f6db66a1709a76a76d3e577c5d0f6d134..2e060dc28526dba7f0a692046b9d987be861cebb 100644 --- a/tests/script/tsim/compute/last_row.sim +++ b/tests/script/tsim/compute/last_row.sim @@ -25,18 +25,18 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , $x ) + sql insert into $tb values ($ms , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw print =============== step2 $i = 1 @@ -44,7 +44,7 @@ $tb = $tbPrefix . $i sql select last_row(tbcol) from $tb print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi @@ -54,21 +54,21 @@ $ms = 1601481600000 + $cc print select last_row(tbcol) from $tb where ts <= $ms sql select last_row(tbcol) from $tb where ts <= $ms print ===> $data00 -if $data00 != 4 then +if $data00 != 4 then return -1 endi print =============== step4 sql select last_row(tbcol) as b from $tb print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi print =============== step7 sql select last_row(tbcol) from $mt print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi @@ -77,13 +77,13 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select last_row(tbcol) as c from $mt where ts <= $ms print ===> $data00 -if $data00 != 4 then +if $data00 != 4 then return -1 endi sql select last_row(tbcol) as c from $mt where tgcol < 5 print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi @@ -91,18 +91,18 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select last_row(tbcol) as c from $mt where tgcol < 5 and ts <= $ms print ===> $data00 -if $data00 != 4 then +if $data00 != 4 then return -1 endi print =============== step10 sql select last_row(tbcol) as b from $mt group by tgcol print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi -if $rows != $tbNum then +if $rows != $tbNum then return -1 endi @@ -110,28 +110,28 @@ print =============== step11 $cc = 1 * 3600000 $ms = 1601481600000 + $cc -sql insert into $tb values( $ms , 10) +sql insert into $tb values( $ms , 10) $cc = 3 * 3600000 $ms = 1601481600000 + $cc -sql insert into $tb values( $ms , null) +sql insert into $tb values( $ms , null) $cc = 5 * 3600000 $ms = 1601481600000 + $cc -sql insert into $tb values( $ms , -1) +sql insert into $tb values( $ms , -1) $cc = 7 * 3600000 $ms = 1601481600000 + $cc -sql insert into $tb values( $ms , null) +sql insert into $tb values( $ms , null) ## for super table $cc = 6 * 3600000 $ms = 1601481600000 + $cc sql select last_row(*) from $mt where ts < $ms -if $data01 != -1 then +if $data01 != -1 then return -1 endi @@ -139,12 +139,12 @@ $cc = 8 * 3600000 $ms = 1601481600000 + $cc sql select last_row(*) from $mt where ts < $ms -if $data01 != NULL then +if $data01 != NULL then return -1 endi sql select last_row(*) from $mt -if $data01 != NULL then +if $data01 != NULL then return -1 endi @@ -152,7 +152,7 @@ $cc = 4 * 3600000 $ms = 1601481600000 + $cc sql select last_row(*) from $mt where ts < $ms -if $data01 != NULL then +if $data01 != NULL then return -1 endi @@ -162,7 +162,7 @@ $cc = 4 * 3600000 $ms2 = 1601481600000 + $cc sql select last_row(*) from $mt where ts > $ms1 and ts <= $ms2 -if $data01 != NULL then +if $data01 != NULL then return -1 endi @@ -171,7 +171,7 @@ $cc = 6 * 3600000 $ms = 1601481600000 + $cc sql select last_row(*) from $tb where ts <= $ms -if $data01 != -1 then +if $data01 != -1 then return -1 endi @@ -179,12 +179,12 @@ $cc = 8 * 3600000 $ms = 1601481600000 + $cc sql select last_row(*) from $tb where ts <= $ms -if $data01 != NULL then +if $data01 != NULL then return -1 endi sql select last_row(*) from $tb -if $data01 != NULL then +if $data01 != NULL then return -1 endi @@ -192,7 +192,7 @@ $cc = 4 * 3600000 $ms = 1601481600000 + $cc sql select last_row(*) from $tb where ts <= $ms -if $data01 != NULL then +if $data01 != NULL then return -1 endi @@ -202,14 +202,14 @@ $cc = 4 * 3600000 $ms2 = 1601481600000 + $cc sql select last_row(*) from $tb where ts > $ms1 and ts <= $ms2 -if $data01 != NULL then +if $data01 != NULL then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi diff --git a/tests/script/tsim/compute/leastsquare.sim b/tests/script/tsim/compute/leastsquare.sim index 0ead02da56570807fbfcdaa96c12aabbcbc26049..cde3c01214f3ee405578fb9961f8ae02cd8e4483 100644 --- a/tests/script/tsim/compute/leastsquare.sim +++ b/tests/script/tsim/compute/leastsquare.sim @@ -25,17 +25,17 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 2 $ms = 1000 while $x < $rowNum $ms = $ms + 1000 - sql insert into $tb values ($ms , $x ) + sql insert into $tb values ($ms , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw print =============== step2 $i = 1 @@ -63,7 +63,7 @@ endi print =============== step5 sql select leastsquares(tbcol, 1, 1) as b from $tb interval(1m) -print ===> $data00 +print ===> $data00 if $data00 != @{slop:1.000000, intercept:1.000000}@ then return -1 endi @@ -81,15 +81,15 @@ if $data00 != @{slop:1.000000, intercept:1.000000}@ then return -1 endi print ===> $rows -if $rows != 1 then +if $rows != 1 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compute/max.sim b/tests/script/tsim/compute/max.sim index 21bca6be089f94924fc357ba89cfee05640e4b3e..451eeaacd81bf576ae4dd43d2aed5bd203cafc71 100644 --- a/tests/script/tsim/compute/max.sim +++ b/tests/script/tsim/compute/max.sim @@ -25,18 +25,18 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - - sql insert into $tb values ($ms , $x ) + + sql insert into $tb values ($ms , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw print =============== step2 $i = 1 @@ -44,7 +44,7 @@ $tb = $tbPrefix . $i sql select max(tbcol) from $tb print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi @@ -54,27 +54,27 @@ $ms = 1601481600000 + $cc sql select max(tbcol) from $tb where ts <= $ms print ===> $data00 -if $data00 != 4 then +if $data00 != 4 then return -1 endi print =============== step4 sql select max(tbcol) as b from $tb print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi print =============== step5 sql select max(tbcol) as b from $tb interval(1m) print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi sql select max(tbcol) as b from $tb interval(1d) print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi @@ -84,17 +84,17 @@ $ms = 1601481600000 + $cc sql select max(tbcol) as b from $tb where ts <= $ms interval(1m) print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step7 sql select max(tbcol) from $mt print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi @@ -104,13 +104,13 @@ $ms = 1601481600000 + $cc sql select max(tbcol) as c from $mt where ts <= $ms print ===> $data00 -if $data00 != 4 then +if $data00 != 4 then return -1 endi sql select max(tbcol) as c from $mt where tgcol < 5 print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi @@ -119,31 +119,31 @@ $ms = 1601481600000 + $cc sql select max(tbcol) as c from $mt where tgcol < 5 and ts <= $ms print ===> $data00 -if $data00 != 4 then +if $data00 != 4 then return -1 endi print =============== step9 sql select max(tbcol) as b from $mt interval(1m) print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi sql select max(tbcol) as b from $mt interval(1d) print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi print =============== step10 sql select max(tbcol) as b from $mt group by tgcol print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi -if $rows != $tbNum then +if $rows != $tbNum then return -1 endi @@ -153,19 +153,19 @@ $ms = 1601481600000 + $cc sql select max(tbcol) as b from $mt where ts <= $ms partition by tgcol interval(1m) print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi print ===> $rows -if $rows != 50 then +if $rows != 50 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compute/min.sim b/tests/script/tsim/compute/min.sim index cf22b6f2be25799b99f81634d4d0f5ae0aec9532..e8ec3232ba01fe0e1ddcf9acba695cab63504bc2 100644 --- a/tests/script/tsim/compute/min.sim +++ b/tests/script/tsim/compute/min.sim @@ -25,18 +25,18 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , $x ) + sql insert into $tb values ($ms , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw print =============== step2 $i = 1 @@ -44,7 +44,7 @@ $tb = $tbPrefix . $i sql select min(tbcol) from $tb print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi @@ -54,27 +54,27 @@ $ms = 1601481600000 + $cc sql select min(tbcol) from $tb where ts < $ms print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi print =============== step4 sql select min(tbcol) as b from $tb print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi print =============== step5 sql select min(tbcol) as b from $tb interval(1m) print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi sql select min(tbcol) as b from $tb interval(1d) print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi @@ -85,17 +85,17 @@ $ms = 1601481600000 + $cc sql select min(tbcol) as b from $tb where ts <= $ms interval(1m) print select min(tbcol) as b from $tb where ts <= $ms interval(1m) print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step7 sql select min(tbcol) from $mt print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi @@ -104,13 +104,13 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select min(tbcol) as c from $mt where ts < $ms print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi sql select min(tbcol) as c from $mt where tgcol < 5 print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi @@ -118,31 +118,31 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select min(tbcol) as c from $mt where tgcol < 5 and ts <= $ms print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi print =============== step9 sql select min(tbcol) as b from $mt interval(1m) print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi sql select min(tbcol) as b from $mt interval(1d) print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi print =============== step10 sql select min(tbcol) as b from $mt group by tgcol print ===> $data00 -if $data00 != 0 then +if $data00 != 0 then return -1 endi -if $rows != $tbNum then +if $rows != $tbNum then return -1 endi @@ -151,19 +151,19 @@ $cc = 4 * 60000 $ms = 1601481600000 + $cc sql select min(tbcol) as b from $mt where ts <= $ms partition by tgcol interval(1m) print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi print ===> $rows -if $rows != 50 then +if $rows != 50 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compute/null.sim b/tests/script/tsim/compute/null.sim index 2dbf61bb079a52388a028368385f9c3e303cacf0..d841f25265c1e569539731808a22acbf88d83140 100644 --- a/tests/script/tsim/compute/null.sim +++ b/tests/script/tsim/compute/null.sim @@ -25,24 +25,24 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - - $v1 = $x + + $v1 = $x $v2 = $x if $x == 0 then $v1 = NULL endi - - sql insert into $tb values ($ms , $v1 , $v2 ) + + sql insert into $tb values ($ms , $v1 , $v2 ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw print =============== step2 $i = 1 @@ -61,47 +61,47 @@ if $rows != 20 then endi print =============== step3 -sql select count(tbcol), count(tbcol2), avg(tbcol), avg(tbcol2), sum(tbcol), sum(tbcol2) from $tb -print ===> $data00 $data01 $data02 $data03 $data04 $data05 -if $data00 != 19 then +sql select count(tbcol), count(tbcol2), avg(tbcol), avg(tbcol2), sum(tbcol), sum(tbcol2) from $tb +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +if $data00 != 19 then return -1 endi -if $data01 != 20 then +if $data01 != 20 then return -1 endi -if $data02 != 10.000000000 then +if $data02 != 10.000000000 then return -1 endi -if $data03 != 9.500000000 then +if $data03 != 9.500000000 then return -1 endi -if $data04 != 190 then +if $data04 != 190 then return -1 endi -if $data05 != 190 then +if $data05 != 190 then return -1 endi print =============== step4 sql select * from $tb where tbcol2 = 19 print ===> $data01 $data02 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 19 then +if $data01 != 19 then return -1 endi -if $data02 != 19 then +if $data02 != 19 then return -1 endi -sql select * from $tb where tbcol is NULL -if $rows != 1 then +sql select * from $tb where tbcol is NULL +if $rows != 1 then return -1 endi -sql select * from $tb where tbcol = NULL -if $rows != 0 then +sql select * from $tb where tbcol = NULL +if $rows != 0 then return -1 endi @@ -113,29 +113,29 @@ sql create table tt using $mt tags( NULL ) #step52: sql select * from $mt where tgcol is NULL -if $rows != 0 then +if $rows != 0 then return -1 endi print =============== step6 -sql select count(tbcol), count(tbcol2), avg(tbcol), avg(tbcol2), sum(tbcol), sum(tbcol2) from $mt -print ===> $data00 $data01 $data02 $data03 $data04 $data05 -if $data00 != 190 then +sql select count(tbcol), count(tbcol2), avg(tbcol), avg(tbcol2), sum(tbcol), sum(tbcol2) from $mt +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +if $data00 != 190 then return -1 endi -if $data01 != 200 then +if $data01 != 200 then return -1 endi -if $data02 != 10.000000000 then +if $data02 != 10.000000000 then return -1 endi -if $data03 != 9.500000000 then +if $data03 != 9.500000000 then return -1 endi -if $data04 != 1900 then +if $data04 != 1900 then return -1 endi -if $data05 != 1900 then +if $data05 != 1900 then return -1 endi @@ -158,15 +158,15 @@ sql insert into t7 values(now, NULL) #sql insert into t8 values(now, NULL) #sql select * from t1 -#if $rows != 1 then +#if $rows != 1 then # return -1 #endi -#if $data01 != NULL then +#if $data01 != NULL then # return -1 #endi sql select * from t2 -if $rows != 1 then +if $rows != 1 then return -1 endi if $data01 != NULL then @@ -174,7 +174,7 @@ if $data01 != NULL then endi sql select * from t3 -if $rows != 1 then +if $rows != 1 then return -1 endi if $data01 != NULL then @@ -182,7 +182,7 @@ if $data01 != NULL then endi sql select * from t4 -if $rows != 1 then +if $rows != 1 then return -1 endi if $data01 != NULL then @@ -190,7 +190,7 @@ if $data01 != NULL then endi sql select * from t5 -if $rows != 1 then +if $rows != 1 then return -1 endi if $data01 != NULL then @@ -198,7 +198,7 @@ if $data01 != NULL then endi sql select * from t6 -if $rows != 1 then +if $rows != 1 then return -1 endi if $data01 != NULL then @@ -206,7 +206,7 @@ if $data01 != NULL then endi sql select * from t7 -if $rows != 1 then +if $rows != 1 then return -1 endi if $data01 != NULL then @@ -214,7 +214,7 @@ if $data01 != NULL then endi #sql select * from t8 -#if $rows != 1 then +#if $rows != 1 then # return -1 #endi #if $data01 != NULL then @@ -224,8 +224,8 @@ endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compute/percentile.sim b/tests/script/tsim/compute/percentile.sim index 1ea82a998bcd9c1d4ede0db815c4bebd948a3ea6..836a6baed5c2e3551c50eddb2ca01bca831a1a5c 100644 --- a/tests/script/tsim/compute/percentile.sim +++ b/tests/script/tsim/compute/percentile.sim @@ -25,18 +25,18 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , $x ) + sql insert into $tb values ($ms , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw print =============== step2 $i = 1 @@ -44,19 +44,19 @@ $tb = $tbPrefix . $i sql select percentile(tbcol, 10) from $tb print ===> $data00 -if $data00 != 1.900000000 then +if $data00 != 1.900000000 then return -1 endi sql select percentile(tbcol, 20) from $tb print ===> $data00 -if $data00 != 3.800000000 then +if $data00 != 3.800000000 then return -1 endi sql select percentile(tbcol, 100) from $tb print ===> $data00 -if $data00 != 19.000000000 then +if $data00 != 19.000000000 then return -1 endi @@ -70,7 +70,7 @@ $ms = 1601481600000 + $cc sql select percentile(tbcol, 1) from $tb where ts > $ms print ===> $data00 -if $data00 != 5.140000000 then +if $data00 != 5.140000000 then return -1 endi @@ -79,7 +79,7 @@ $ms = 1601481600000 + $cc sql select percentile(tbcol, 5) from $tb where ts > $ms print ===> $data00 -if $data00 != 5.700000000 then +if $data00 != 5.700000000 then return -1 endi @@ -88,7 +88,7 @@ $ms = 1601481600000 + $cc sql select percentile(tbcol, 0) from $tb where ts > $ms print ===> $data00 -if $data00 != 5.000000000 then +if $data00 != 5.000000000 then return -1 endi @@ -98,7 +98,7 @@ $ms = 1601481600000 + $cc sql select percentile(tbcol, 1) as c from $tb where ts > $ms print ===> $data00 -if $data00 != 5.140000000 then +if $data00 != 5.140000000 then return -1 endi @@ -107,7 +107,7 @@ $ms = 1601481600000 + $cc sql select percentile(tbcol, 5) as c from $tb where ts > $ms print ===> $data00 -if $data00 != 5.700000000 then +if $data00 != 5.700000000 then return -1 endi @@ -116,15 +116,15 @@ $ms = 1601481600000 + $cc sql select percentile(tbcol, 0) as c from $tb where ts > $ms print ===> $data00 -if $data00 != 5.000000000 then +if $data00 != 5.000000000 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compute/stddev.sim b/tests/script/tsim/compute/stddev.sim index 9e7a52a774c418c84d1c3ab7b4d7b8c19efba223..db3af3e207129e4f103410298422cbcd6419c8ea 100644 --- a/tests/script/tsim/compute/stddev.sim +++ b/tests/script/tsim/compute/stddev.sim @@ -25,18 +25,18 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , $x ) + sql insert into $tb values ($ms , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw print =============== step2 $i = 1 @@ -44,7 +44,7 @@ $tb = $tbPrefix . $i sql select stddev(tbcol) from $tb print ===> $data00 -if $data00 != 5.766281297 then +if $data00 != 5.766281297 then return -1 endi @@ -54,27 +54,27 @@ $ms = 1601481600000 + $cc sql select stddev(tbcol) from $tb where ts <= $ms print ===> $data00 -if $data00 != 1.414213562 then +if $data00 != 1.414213562 then return -1 endi print =============== step4 sql select stddev(tbcol) as b from $tb print ===> $data00 -if $data00 != 5.766281297 then +if $data00 != 5.766281297 then return -1 endi print =============== step5 sql select stddev(tbcol) as b from $tb interval(1m) -print ===> $data00 -if $data00 != 0.000000000 then +print ===> $data00 +if $data00 != 0.000000000 then return -1 endi sql select stddev(tbcol) as b from $tb interval(1d) print ===> $data00 -if $data00 != 5.766281297 then +if $data00 != 5.766281297 then return -1 endi @@ -84,18 +84,18 @@ $ms = 1601481600000 + $cc sql select stddev(tbcol) as b from $tb where ts <= $ms interval(1m) print ===> $data00 -if $data00 != 0.000000000 then +if $data00 != 0.000000000 then return -1 endi -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compute/sum.sim b/tests/script/tsim/compute/sum.sim index 717389e061eefe53be667e4bffec53222963d716..7bb000817e510c74afd89ed564365a43b7d8b505 100644 --- a/tests/script/tsim/compute/sum.sim +++ b/tests/script/tsim/compute/sum.sim @@ -25,18 +25,18 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , $x ) + sql insert into $tb values ($ms , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw print =============== step2 $i = 1 @@ -44,7 +44,7 @@ $tb = $tbPrefix . $i sql select sum(tbcol) from $tb print ===> $data00 -if $data00 != 190 then +if $data00 != 190 then return -1 endi @@ -54,27 +54,27 @@ $ms = 1601481600000 + $cc sql select sum(tbcol) from $tb where ts <= $ms print ===> $data00 -if $data00 != 10 then +if $data00 != 10 then return -1 endi print =============== step4 sql select sum(tbcol) as b from $tb print ===> $data00 -if $data00 != 190 then +if $data00 != 190 then return -1 endi print =============== step5 sql select sum(tbcol) as b from $tb interval(1m) print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi sql select sum(tbcol) as b from $tb interval(1d) print ===> $data00 -if $data00 != 190 then +if $data00 != 190 then return -1 endi @@ -84,17 +84,17 @@ $ms = 1601481600000 + $cc sql select sum(tbcol) as b from $tb where ts <= $ms interval(1m) print ===> $data10 -if $data10 != 1 then +if $data10 != 1 then return -1 endi -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step7 sql select sum(tbcol) from $mt print ===> $data00 -if $data00 != 1900 then +if $data00 != 1900 then return -1 endi @@ -104,13 +104,13 @@ $ms = 1601481600000 + $cc sql select sum(tbcol) as c from $mt where ts <= $ms print ===> $data00 -if $data00 != 100 then +if $data00 != 100 then return -1 endi sql select sum(tbcol) as c from $mt where tgcol < 5 print ===> $data00 -if $data00 != 950 then +if $data00 != 950 then return -1 endi @@ -119,31 +119,31 @@ $ms = 1601481600000 + $cc sql select sum(tbcol) as c from $mt where tgcol < 5 and ts <= $ms print ===> $data00 -if $data00 != 50 then +if $data00 != 50 then return -1 endi print =============== step9 sql select sum(tbcol) as b from $mt interval(1m) print ===> $data10 -if $data10 < 5 then +if $data10 < 5 then return -1 endi sql select sum(tbcol) as b from $mt interval(1d) print ===> $data00 -if $data00 != 1900 then +if $data00 != 1900 then return -1 endi print =============== step10 sql select sum(tbcol) as b from $mt group by tgcol print ===> $data00 -if $data00 != 190 then +if $data00 != 190 then return -1 endi -if $rows != $tbNum then +if $rows != $tbNum then return -1 endi @@ -153,19 +153,19 @@ $ms = 1601481600000 + $cc sql select sum(tbcol) as b from $mt where ts <= $ms partition by tgcol interval(1d) print select sum(tbcol) as b from $mt where ts <= $ms partition by tgcol interval(1d) -print ===> $data00 $rows -if $data00 != 10 then +print ===> $data00 $rows +if $data00 != 10 then return -1 endi -if $rows != 10 then +if $rows != 10 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/compute/top.sim b/tests/script/tsim/compute/top.sim index 75445762d063e749069a64d5471d9cc6d1870904..8caf6038962f0c6672b695d0cf33406a2a0e2a15 100644 --- a/tests/script/tsim/compute/top.sim +++ b/tests/script/tsim/compute/top.sim @@ -25,18 +25,18 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , $x ) + sql insert into $tb values ($ms , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw print =============== step2 $i = 1 @@ -44,7 +44,7 @@ $tb = $tbPrefix . $i sql select top(tbcol, 1) from $tb print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi @@ -54,24 +54,24 @@ $ms = 1601481600000 + $cc sql select top(tbcol, 1) from $tb where ts <= $ms print ===> $data00 -if $data00 != 4 then +if $data00 != 4 then return -1 endi print =============== step4 sql select top(tbcol, 1) as b from $tb print ===> $data00 -if $data00 != 19 then +if $data00 != 19 then return -1 endi print =============== step5 -sql select top(tbcol, 2) as b from $tb +sql select top(tbcol, 2) as b from $tb print ===> $data00 $data10 -if $data00 != 18 then +if $data00 != 18 then return -1 endi -if $data10 != 19 then +if $data10 != 19 then return -1 endi @@ -81,10 +81,10 @@ $ms = 1601481600000 + $cc sql select top(tbcol, 2) as b from $tb where ts <= $ms print ===> $data00 $data10 -if $data00 != 3 then +if $data00 != 3 then return -1 endi -if $data10 != 4 then +if $data10 != 4 then return -1 endi @@ -95,8 +95,8 @@ step6: print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/db/alter_option.sim b/tests/script/tsim/db/alter_option.sim index d81671eebdd6e64305988fb4ec058b6dd12fc147..3d260235f26d1bccd9094d118deebeceb35e82e5 100644 --- a/tests/script/tsim/db/alter_option.sim +++ b/tests/script/tsim/db/alter_option.sim @@ -186,13 +186,13 @@ sql_error alter database db replica 0 #sql alter database db replica 1 #sql select * from information_schema.ins_databases #print replica: $data4_db -#if $data4_db != 1 then +#if $data4_db != 1 then # return -1 #endi #sql alter database db replica 3 #sql select * from information_schema.ins_databases #print replica: $data4_db -#if $data4_db != 3 then +#if $data4_db != 3 then # return -1 #endi @@ -200,13 +200,13 @@ sql_error alter database db replica 0 #sql alter database db quorum 2 #sql select * from information_schema.ins_databases #print quorum $data5_db -#if $data5_db != 2 then +#if $data5_db != 2 then # return -1 #endi #sql alter database db quorum 1 #sql select * from information_schema.ins_databases #print quorum $data5_db -#if $data5_db != 1 then +#if $data5_db != 1 then # return -1 #endi @@ -233,7 +233,7 @@ endi #sql alter database db keep 1000,2000 #sql select * from information_schema.ins_databases #print keep $data7_db -#if $data7_db != 500,500,500 then +#if $data7_db != 500,500,500 then # return -1 #endi @@ -263,13 +263,13 @@ sql_error alter database db keep -1 #sql alter database db blocks 3 #sql select * from information_schema.ins_databases #print blocks $data9_db -#if $data9_db != 3 then +#if $data9_db != 3 then # return -1 #endi #sql alter database db blocks 11 #sql select * from information_schema.ins_databases #print blocks $data9_db -#if $data9_db != 11 then +#if $data9_db != 11 then # return -1 #endi @@ -300,13 +300,13 @@ print ============== step wal_level sql alter database db wal_level 1 sql select * from information_schema.ins_databases print wal_level $data20_db -if $data20_db != 1 then +if $data20_db != 1 then return -1 endi sql alter database db wal_level 2 sql select * from information_schema.ins_databases print wal_level $data20_db -if $data20_db != 2 then +if $data20_db != 2 then return -1 endi @@ -319,19 +319,19 @@ print ============== modify wal_fsync_period sql alter database db wal_fsync_period 2000 sql select * from information_schema.ins_databases print wal_fsync_period $data21_db -if $data21_db != 2000 then +if $data21_db != 2000 then return -1 endi sql alter database db wal_fsync_period 500 sql select * from information_schema.ins_databases print wal_fsync_period $data21_db -if $data21_db != 500 then +if $data21_db != 500 then return -1 endi sql alter database db wal_fsync_period 0 sql select * from information_schema.ins_databases print wal_fsync_period $data21_db -if $data21_db != 0 then +if $data21_db != 0 then return -1 endi sql_error alter database db wal_fsync_period 180001 @@ -351,31 +351,31 @@ print ============== modify cachelast [0, 1, 2, 3] sql alter database db cachemodel 'last_value' sql select * from information_schema.ins_databases print cachelast $data18_db -if $data18_db != last_value then +if $data18_db != last_value then return -1 endi sql alter database db cachemodel 'last_row' sql select * from information_schema.ins_databases print cachelast $data18_db -if $data18_db != last_row then +if $data18_db != last_row then return -1 endi sql alter database db cachemodel 'none' sql select * from information_schema.ins_databases print cachelast $data18_db -if $data18_db != none then +if $data18_db != none then return -1 endi sql alter database db cachemodel 'last_value' sql select * from information_schema.ins_databases print cachelast $data18_db -if $data18_db != last_value then +if $data18_db != last_value then return -1 endi sql alter database db cachemodel 'both' sql select * from information_schema.ins_databases print cachelast $data18_db -if $data18_db != both then +if $data18_db != both then return -1 endi diff --git a/tests/script/tsim/db/alter_replica_13.sim b/tests/script/tsim/db/alter_replica_13.sim index d232c9bcd3eee5422a4ae74ac3b401d0ba3420a6..1d06d3abb96b101d6fdaede7a321515a9999d8c5 100644 --- a/tests/script/tsim/db/alter_replica_13.sim +++ b/tests/script/tsim/db/alter_replica_13.sim @@ -36,10 +36,10 @@ endi print =============== step2: create database sql create database db vgroups 1 sql select * from information_schema.ins_databases -if $rows != 3 then +if $rows != 3 then return -1 endi -if $data(db)[4] != 1 then +if $data(db)[4] != 1 then return -1 endi @@ -82,7 +82,7 @@ step3: return -1 endi sql select * from information_schema.ins_dnodes -print ===> rows: $rows +print ===> rows: $rows print ===> $data00 $data01 $data02 $data03 $data04 $data05 print ===> $data10 $data11 $data12 $data13 $data14 $data15 print ===> $data20 $data21 $data22 $data23 $data24 $data25 @@ -115,7 +115,7 @@ step4: return -1 endi sql show db.vgroups -print ===> rows: $rows +print ===> rows: $rows print ===> $data00 $data01 $data02 $data03 $data04 $data05 if $data[0][4] != leader then goto step4 @@ -137,4 +137,4 @@ endi system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode4 -s stop -x SIGINT diff --git a/tests/script/tsim/db/alter_replica_31.sim b/tests/script/tsim/db/alter_replica_31.sim index 17ab04052043be36f22e10d56ae06ddf717b758a..4ab6783d0702197fe32f34830a67c3cc64a7b4a7 100644 --- a/tests/script/tsim/db/alter_replica_31.sim +++ b/tests/script/tsim/db/alter_replica_31.sim @@ -23,7 +23,7 @@ step1: return -1 endi sql select * from information_schema.ins_dnodes -print ===> rows: $rows +print ===> rows: $rows print ===> $data00 $data01 $data02 $data03 $data04 $data05 print ===> $data10 $data11 $data12 $data13 $data14 $data15 print ===> $data20 $data21 $data22 $data23 $data24 $data25 @@ -47,10 +47,10 @@ endi print =============== step2: create database sql create database db vgroups 1 replica 3 sql select * from information_schema.ins_databases -if $rows != 3 then +if $rows != 3 then return -1 endi -if $data(db)[4] != 3 then +if $data(db)[4] != 3 then return -1 endi @@ -139,7 +139,7 @@ step3: return -1 endi sql show db.vgroups -print ===> rows: $rows +print ===> rows: $rows if $rows != 1 then goto step3 endi @@ -165,4 +165,4 @@ endi system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode4 -s stop -x SIGINT diff --git a/tests/script/tsim/db/back_insert.sim b/tests/script/tsim/db/back_insert.sim index e2bdb3a64bb06f56f4fdaf1fba58913ac0b94808..b3f820729386df8ec4bfea69feaabf33f137b3ef 100644 --- a/tests/script/tsim/db/back_insert.sim +++ b/tests/script/tsim/db/back_insert.sim @@ -2,8 +2,8 @@ sql connect $x = 1 begin: sql reset query cache - sleep 100 - sql insert into db.tb values(now, $x ) -x begin - #print ===> insert successed $x - $x = $x + 1 -goto begin \ No newline at end of file + sleep 100 + sql insert into db.tb values(now, $x ) -x begin + #print ===> insert successed $x + $x = $x + 1 +goto begin diff --git a/tests/script/tsim/db/basic1.sim b/tests/script/tsim/db/basic1.sim index 679440590fe99648ee2907adbc027c3daa7aa526..69eeb9347b22b154c7609b2ff89e36aa43f63a82 100644 --- a/tests/script/tsim/db/basic1.sim +++ b/tests/script/tsim/db/basic1.sim @@ -25,15 +25,15 @@ endi print =============== show vgroups1 sql use d1 sql show vgroups -if $rows != 2 then +if $rows != 2 then return -1 endi -if $data00 != 2 then +if $data00 != 2 then return -1 endi -if $data10 != 3 then +if $data10 != 3 then return -1 endi @@ -59,11 +59,11 @@ if $rows != 2 then return -1 endi -if $data00 != 4 then +if $data00 != 4 then return -1 endi -if $data10 != 5 then +if $data10 != 5 then return -1 endi @@ -73,15 +73,15 @@ if $rows != 3 then return -1 endi -if $data00 != 6 then +if $data00 != 6 then return -1 endi -if $data10 != 7 then +if $data10 != 7 then return -1 endi -if $data20 != 8 then +if $data20 != 8 then return -1 endi @@ -91,19 +91,19 @@ if $rows != 4 then return -1 endi -if $data00 != 9 then +if $data00 != 9 then return -1 endi -if $data10 != 10 then +if $data10 != 10 then return -1 endi -if $data20 != 11 then +if $data20 != 11 then return -1 endi -if $data30 != 12 then +if $data30 != 12 then return -1 endi diff --git a/tests/script/tsim/db/basic2.sim b/tests/script/tsim/db/basic2.sim index 114adf98e69f56b2a520446958bbd75abcc15a6e..4f0ba4a13c18f29a758a92318c2a66c133fd28f3 100644 --- a/tests/script/tsim/db/basic2.sim +++ b/tests/script/tsim/db/basic2.sim @@ -3,6 +3,24 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/exec.sh -n dnode1 -s start sql connect +print =============== conflict stb +sql create database db vgroups 4; +sql use db; +sql create table stb (ts timestamp, i int) tags (j int); +sql_error create table stb using stb tags (1); +sql_error create table stb (ts timestamp, i int); + +sql create table ctb (ts timestamp, i int); +sql_error create table ctb (ts timestamp, i int) tags (j int); + +sql create table ntb (ts timestamp, i int); +sql_error create table ntb (ts timestamp, i int) tags (j int); + +sql drop table ntb +sql create table ntb (ts timestamp, i int) tags (j int); + +sql drop database db + print =============== create database d1 sql create database d1 sql use d1 @@ -12,7 +30,7 @@ sql create table t3 (ts timestamp, i int); sql create table t4 (ts timestamp, i int); sql select * from information_schema.ins_databases -print rows: $rows +print rows: $rows print $data00 $data01 $data02 $data03 print $data10 $data11 $data12 $data13 if $rows != 3 then @@ -32,7 +50,7 @@ endi #endi sql show tables -if $rows != 4 then +if $rows != 4 then return -1 endi @@ -49,8 +67,8 @@ if $rows != 4 then endi sql show tables -if $rows != 3 then +if $rows != 3 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/db/basic3.sim b/tests/script/tsim/db/basic3.sim index 30faec0494fa05661a7434c14c894efe8d0f4915..db355213db84fcb8e22ced9dbef37486ef243982 100644 --- a/tests/script/tsim/db/basic3.sim +++ b/tests/script/tsim/db/basic3.sim @@ -23,12 +23,12 @@ if $data22 != 2 then return -1 endi -#if $data03 != 4 then +#if $data03 != 4 then # return -1 #endi sql show d1.tables -if $rows != 4 then +if $rows != 4 then return -1 endi @@ -44,8 +44,8 @@ if $rows != 4 then endi sql show d2.tables -if $rows != 3 then +if $rows != 3 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/db/basic4.sim b/tests/script/tsim/db/basic4.sim index f407c6352d38d858f8e918142ca820a76e668878..7a5e0ec76478d0a401a803b95b344172dfcef1c4 100644 --- a/tests/script/tsim/db/basic4.sim +++ b/tests/script/tsim/db/basic4.sim @@ -11,109 +11,109 @@ sql create table d1.t3 (ts timestamp, i int); sql create table d1.t4 (ts timestamp, i int); sql select * from information_schema.ins_databases -if $rows != 3 then +if $rows != 3 then return -1 endi -if $data20 != d1 then +if $data20 != d1 then return -1 endi -if $data22 != 1 then +if $data22 != 1 then return -1 endi -if $data24 != 1 then +if $data24 != 1 then return -1 endi sql show d1.tables -if $rows != 4 then +if $rows != 4 then return -1 endi sql show d1.vgroups -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != 2 then +if $data00 != 2 then return -1 endi -if $data01 != d1 then +if $data01 != d1 then return -1 endi -print =============== drop table +print =============== drop table sql drop table d1.t1 sql select * from information_schema.ins_databases -if $rows != 3 then +if $rows != 3 then return -1 endi -if $data20 != d1 then +if $data20 != d1 then return -1 endi -if $data22 != 1 then +if $data22 != 1 then return -1 endi -if $data24 != 1 then +if $data24 != 1 then return -1 endi sql show d1.tables -if $rows != 3 then +if $rows != 3 then return -1 endi sql show d1.vgroups -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != 2 then +if $data00 != 2 then return -1 endi -if $data01 != d1 then +if $data01 != d1 then return -1 endi -print =============== drop all table +print =============== drop all table sql drop table d1.t2 sql drop table d1.t3 sql drop table d1.t4 sql select * from information_schema.ins_databases -if $rows != 3 then +if $rows != 3 then return -1 endi -if $data20 != d1 then +if $data20 != d1 then return -1 endi -if $data22 != 1 then +if $data22 != 1 then return -1 endi -if $data24 != 1 then +if $data24 != 1 then return -1 endi sql show d1.tables -if $rows != 0 then +if $rows != 0 then return -1 endi sql show d1.vgroups -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != 2 then +if $data00 != 2 then return -1 endi -if $data01 != d1 then +if $data01 != d1 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/db/basic5.sim b/tests/script/tsim/db/basic5.sim index 9b809c35f09c28dc81cefedb24e3bd507d792f34..933fb8cf4b49a1d737b58864c6f9cd63f00b9a1e 100644 --- a/tests/script/tsim/db/basic5.sim +++ b/tests/script/tsim/db/basic5.sim @@ -13,13 +13,13 @@ sql create table tb1 using st1 tags(1); sql insert into tb1 values (now, 1); sql show stables -if $rows != 1 then +if $rows != 1 then print $rows return -1 endi sql show tables -if $rows != 1 then +if $rows != 1 then return -1 endi @@ -35,12 +35,12 @@ sql use db1; sql create stable st1 (ts timestamp, f1 int) tags(t1 int) sql show stables -if $rows != 1 then +if $rows != 1 then return -1 endi sql show tables -if $rows != 0 then +if $rows != 0 then return -1 endi diff --git a/tests/script/tsim/db/basic6.sim b/tests/script/tsim/db/basic6.sim index 2377a65ac0983f5a60d4bc5c881d2377d2c92a43..50435747874475c58b1c493abf935f15ca1f61d9 100644 --- a/tests/script/tsim/db/basic6.sim +++ b/tests/script/tsim/db/basic6.sim @@ -14,7 +14,7 @@ $st = $stPrefix . $i $tb = $tbPrefix . $i print =============== step1 -# quorum presicion +# quorum presicion sql create database $db vgroups 8 replica 1 duration 2 keep 10 minrows 80 maxrows 10000 wal_level 2 wal_fsync_period 1000 comp 0 cachemodel 'last_value' precision 'us' sql select * from information_schema.ins_databases print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09 @@ -46,7 +46,7 @@ endi #if $data29 != 12 then # return -1 #endi - + print =============== step2 sql_error create database $db sql create database if not exists $db @@ -60,7 +60,7 @@ sql drop database $db sql select * from information_schema.ins_databases if $rows != 2 then return -1 -endi +endi print =============== step4 sql_error drop database $db @@ -102,22 +102,22 @@ while $i < 5 sql create table $tb using $st tags(1) sql show stables - if $rows != 1 then + if $rows != 1 then return -1 endi print $data00 $data01 $data02 $data03 - if $data00 != $st then + if $data00 != $st then return -1 endi sql show tables - if $rows != 1 then + if $rows != 1 then return -1 endi print $data00 $data01 $data02 $data03 - if $data00 != $tb then + if $data00 != $tb then return -1 endi @@ -127,8 +127,8 @@ endw print =============== step7 $i = 0 while $i < 5 - $db = $dbPrefix . $i - sql drop database $db + $db = $dbPrefix . $i + sql drop database $db $i = $i + 1 endw @@ -143,20 +143,20 @@ sql create table $st (ts timestamp, i int) tags (j int) sql create table $tb using $st tags(1) sql show stables -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != $st then +if $data00 != $st then return -1 endi sql show tables -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != $tb then +if $data00 != $tb then return -1 endi @@ -168,12 +168,12 @@ sql create database $db sql use $db sql show stables -if $rows != 0 then +if $rows != 0 then return -1 endi sql show tables -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -182,20 +182,20 @@ sql create table $st (ts timestamp, i int) tags (j int) sql create table $tb using $st tags(1) sql show stables -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != $st then +if $data00 != $st then return -1 endi sql show tables -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != $tb then +if $data00 != $tb then return -1 endi @@ -207,12 +207,12 @@ sql create database $db sql use $db sql show stables -if $rows != 0 then +if $rows != 0 then return -1 endi sql show tables -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -221,20 +221,20 @@ sql create table $st (ts timestamp, i int) tags (j int) sql create table $tb using $st tags(1) sql show stables -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != $st then +if $data00 != $st then return -1 endi sql show tables -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != $tb then +if $data00 != $tb then return -1 endi @@ -245,12 +245,12 @@ sql insert into $tb values (now+4a, 3) sql insert into $tb values (now+5a, 4) sql select * from $tb -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $st -if $rows != 5 then +if $rows != 5 then return -1 endi @@ -262,12 +262,12 @@ sql create database $db sql use $db sql show stables -if $rows != 0 then +if $rows != 0 then return -1 endi sql show tables -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -276,20 +276,20 @@ sql create table $st (ts timestamp, i int) tags (j int) sql create table $tb using $st tags(1) sql show stables -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != $st then +if $data00 != $st then return -1 endi sql show tables -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != $tb then +if $data00 != $tb then return -1 endi @@ -300,12 +300,12 @@ sql insert into $tb values (now+4a, 3) sql insert into $tb values (now+5a, 4) sql select * from $tb -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $st -if $rows != 5 then +if $rows != 5 then return -1 endi diff --git a/tests/script/tsim/db/commit.sim b/tests/script/tsim/db/commit.sim index 731f2aa256f219f37aed66c8a08098f9b322706c..223324503411000433f33b890e095d1534027572 100644 --- a/tests/script/tsim/db/commit.sim +++ b/tests/script/tsim/db/commit.sim @@ -39,9 +39,9 @@ sql create table tb (ts timestamp, i int) $x = 1 while $x < 41 $time = $x . m - sql insert into tb values (now + $time , $x ) + sql insert into tb values (now + $time , $x ) $x = $x + 1 -endw +endw sql select * from tb order by ts desc print ===> rows $rows @@ -71,7 +71,7 @@ if $data01 != 40 then return -1 endi -$oldnum = $rows +$oldnum = $rows $num = $rows + 2 print ======== step3 import old data @@ -120,4 +120,4 @@ if $data01 != 40 then endi system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode2 -s stop -x SIGINT diff --git a/tests/script/tsim/db/delete_reuse1.sim b/tests/script/tsim/db/delete_reuse1.sim index 680fe6b2eddac8d61f4a8f1da8c03f7fc554e408..9dcb3c6ac1a3198698b1ba88192707262aa6a7ae 100644 --- a/tests/script/tsim/db/delete_reuse1.sim +++ b/tests/script/tsim/db/delete_reuse1.sim @@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/exec.sh -n dnode1 -s start sql connect -print ======== step1 +print ======== step1 sql create database d1 replica 1 vgroups 1 sql create database d2 replica 1 vgroups 1 sql create database d3 replica 1 vgroups 1 @@ -47,7 +47,7 @@ step2: print ========= step3 sql reset query cache -sleep 50 +sleep 50 sql create database d1 replica 1 sql create table d1.t1 (ts timestamp, i int) @@ -65,20 +65,20 @@ while $x < 20 sql insert into d1.t1 values(now, -1) -x step4 return -1 step4: - + sql create database d1 replica 1 sql reset query cache - sleep 50 + sleep 50 sql create table d1.t1 (ts timestamp, i int) sql insert into d1.t1 values(now, $x ) sql select * from d1.t1 if $rows != 1 then return -1 endi - + $x = $x + 1 - + print ===> loop times: $x -endw +endw -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/db/delete_reuse2.sim b/tests/script/tsim/db/delete_reuse2.sim index d181b6b7806a30a70e2ba8ea05d2ab6022f037c8..4480b60b1b7a3eecfa605cfc4e287b4928bd971a 100644 --- a/tests/script/tsim/db/delete_reuse2.sim +++ b/tests/script/tsim/db/delete_reuse2.sim @@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/exec.sh -n dnode1 -s start sql connect -print ======== step1 +print ======== step1 sql create database d1 replica 1 sql create database d2 replica 1 sql create database d3 replica 1 @@ -48,7 +48,7 @@ step2: print ========= step3 sql create database db1 replica 1 sql reset query cache -sleep 50 +sleep 50 sql create table db1.tb1 (ts timestamp, i int) sql insert into db1.tb1 values(now, 2) @@ -61,7 +61,7 @@ print ========= step4 $x = 1 while $x < 20 - $db = db . $x + $db = db . $x $tb = tb . $x sql use $db sql drop database $db @@ -69,14 +69,14 @@ while $x < 20 sql insert into $tb values(now, -1) -x step4 return -1 step4: - - $x = $x + 1 - $db = db . $x + + $x = $x + 1 + $db = db . $x $tb = tb . $x - + sql reset query cache - sleep 50 - + sleep 50 + sql create database $db replica 1 sql use $db sql create table $tb (ts timestamp, i int) @@ -85,8 +85,8 @@ while $x < 20 if $rows != 1 then return -1 endi - + print ===> loop times: $x -endw +endw -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/db/delete_reusevnode.sim b/tests/script/tsim/db/delete_reusevnode.sim index d194f82d084b6855841970f8b7ffb822557a6100..7af5c9d39d2c76cf6f2dcf245e219c7a81577405 100644 --- a/tests/script/tsim/db/delete_reusevnode.sim +++ b/tests/script/tsim/db/delete_reusevnode.sim @@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/exec.sh -n dnode1 -s start sql connect -print ======== step1 +print ======== step1 $tbPrefix = t $i = 0 @@ -21,13 +21,13 @@ while $i < 30 print times $i $i = $i + 1 -endw +endw print ======== step2 sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 -endi +endi system sh/stop_dnodes.sh @@ -94,4 +94,4 @@ if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/db/delete_reusevnode2.sim b/tests/script/tsim/db/delete_reusevnode2.sim index 754a6d695b6db790c85657129a751d873a796301..91473e5ee16d61f80eedf2bca85031426ebab92a 100644 --- a/tests/script/tsim/db/delete_reusevnode2.sim +++ b/tests/script/tsim/db/delete_reusevnode2.sim @@ -62,4 +62,4 @@ if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/db/delete_writing1.sim b/tests/script/tsim/db/delete_writing1.sim index 279f8dece872b7c7aea0c1f46dee600894dd22a3..6fec09989d83160a64b09e9781aa1422274baa61 100644 --- a/tests/script/tsim/db/delete_writing1.sim +++ b/tests/script/tsim/db/delete_writing1.sim @@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/exec.sh -n dnode1 -s start sql connect -sql create database db +sql create database db sql create table db.tb (ts timestamp, i int) sql insert into db.tb values(now, 1) @@ -11,18 +11,18 @@ print ======== start back run_back tsim/db/back_insert.sim sleep 1000 -print ======== step1 -$x = 1 +print ======== step1 +$x = 1 while $x < 10 print drop database times $x sql drop database if exists db - sql create database db + sql create database db sql create table db.tb (ts timestamp, i int) sleep 1000 - + $x = $x + 1 endw -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/db/delete_writing2.sim b/tests/script/tsim/db/delete_writing2.sim index 8eab126ae89472efac9e6f90c5b5e8a8ea033436..ad156f30eb0be57bff2ba3e696e02c8ce2c378bb 100644 --- a/tests/script/tsim/db/delete_writing2.sim +++ b/tests/script/tsim/db/delete_writing2.sim @@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/exec.sh -n dnode1 -s start sql connect -sql create database db +sql create database db sql create table db.tb (ts timestamp, i int) sql insert into db.tb values(now, 1) @@ -11,11 +11,11 @@ sql create database db2 sql create table db2.tb2 (ts timestamp, i int) sql insert into db2.tb2 values(now, 1) -sql create database db3 +sql create database db3 sql create table db3.tb3 (ts timestamp, i int) sql insert into db3.tb3 values(now, 1) -sql create database db4 +sql create database db4 sql create table db4.tb4 (ts timestamp, i int) sql insert into db4.tb4 values(now, 1) @@ -23,19 +23,19 @@ print ======== start back run_back tsim/db/back_insert.sim sleep 1000 -print ======== step1 -$x = 1 +print ======== step1 +$x = 1 while $x < 10 print drop database times $x sql drop database if exists db - sql create database db + sql create database db sql create table db.tb (ts timestamp, i int) sleep 1000 - + $x = $x + 1 endw -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/db/dropdnodes.sim b/tests/script/tsim/db/dropdnodes.sim index 8a46d5f9ce405cb84957900c37986cbede044464..20b4a136df102d4ec419b4a109192feb3768beef 100644 --- a/tests/script/tsim/db/dropdnodes.sim +++ b/tests/script/tsim/db/dropdnodes.sim @@ -12,7 +12,7 @@ system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode2 -s start sleep 2000 -sql connect +sql connect sql create dnode $hostname2 sleep 2000 @@ -61,13 +61,13 @@ sql show tables print $rows if $rows != 16 then return -1 -endi +endi sql select * from mt print $rows if $rows != 16 then return -1 -endi +endi print ========== step3 @@ -82,26 +82,26 @@ sql show tables print $rows if $rows != 8 then return -1 -endi +endi sql select * from mt print $rows if $rows != 8 then return -1 -endi +endi sql select * from db.t5 if $rows != 1 then return -1 -endi +endi sql select * from db.t13 if $rows != 1 then return -1 -endi +endi sql_error select * from db.t1 sql_error select * from db.t9 system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode2 -s stop -x SIGINT diff --git a/tests/script/tsim/db/keep.sim b/tests/script/tsim/db/keep.sim index e146a666d083af5c0431ad1cd639b9321096c5f7..f0653c4801bd219023ce43904b98b9a92cf40616 100644 --- a/tests/script/tsim/db/keep.sim +++ b/tests/script/tsim/db/keep.sim @@ -14,7 +14,7 @@ while $x < 41 sql insert into tb values (now - $time , $x ) -x step2 step2: $x = $x + 1 -endw +endw sql select * from tb print ===> rows $rows last $data01 @@ -42,10 +42,10 @@ sql alter database keepdb keep 60 sql flush database keepdb sql select * from information_schema.ins_databases print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 -if $data22 != 2 then +if $data22 != 2 then return -1 endi -if $data27 != 86400m,86400m,86400m then +if $data27 != 86400m,86400m,86400m then return -1 endi @@ -56,7 +56,7 @@ while $x < 81 sql insert into tb values (now - $time , $x ) -x step4 step4: $x = $x + 1 -endw +endw sql select * from tb print ===> rows $rows last $data01 @@ -83,10 +83,10 @@ endi print ======== step6 alter db sql alter database keepdb keep 30 sql select * from information_schema.ins_databases -if $data22 != 2 then +if $data22 != 2 then return -1 endi -if $data27 != 43200m,43200m,43200m then +if $data27 != 43200m,43200m,43200m then return -1 endi @@ -110,7 +110,7 @@ while $x < 121 sql insert into tb values (now - $time , $x ) -x step8 step8: $x = $x + 1 -endw +endw sql select * from tb print ===> rows $rows last $data01 @@ -137,4 +137,4 @@ error3: print ======= test success system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode2 -s stop -x SIGINT diff --git a/tests/script/tsim/db/len.sim b/tests/script/tsim/db/len.sim index ae475ddf47c8d327d317148fe27b22c52b336eaa..047dafd5f8c13ce8533babd1a0c90d9a93d6982c 100644 --- a/tests/script/tsim/db/len.sim +++ b/tests/script/tsim/db/len.sim @@ -11,33 +11,33 @@ sql create database -x step1 step1: sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi print =============== step2 sql create database a sql select * from information_schema.ins_databases -if $rows != 3 then +if $rows != 3 then return -1 endi sql drop database a sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi print =============== step3 sql create database a12345678 sql select * from information_schema.ins_databases -if $rows != 3 then +if $rows != 3 then return -1 endi sql drop database a12345678 sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi @@ -46,15 +46,15 @@ sql create database a012345678901201234567890120123456789012a0123456789012012345 return -1 step4: sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi print =============== step5 -sql create database a;1 +sql create database a;1 sql drop database a sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi @@ -64,7 +64,7 @@ sql create database a'1 -x step6 step6: sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi @@ -73,7 +73,7 @@ sql create database (a) -x step7 return -1 step7: sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi @@ -82,8 +82,8 @@ sql create database a.1 -x step8 return -1 step8: sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/db/repeat.sim b/tests/script/tsim/db/repeat.sim index 98d66244f519769f7436cbcf52ec8c97b7cba15e..b0627659d082be380f768aa378f4202d86717958 100644 --- a/tests/script/tsim/db/repeat.sim +++ b/tests/script/tsim/db/repeat.sim @@ -56,4 +56,4 @@ sql drop database d10 sql drop database d11 sql drop database d12 -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/db/show_create_db.sim b/tests/script/tsim/db/show_create_db.sim index 45007d01d699a1cb3a04b8e08b174523540e78e3..3a51fedbff18e00cf087146c403aa027f857d3d3 100644 --- a/tests/script/tsim/db/show_create_db.sim +++ b/tests/script/tsim/db/show_create_db.sim @@ -7,7 +7,7 @@ print =============== step2 sql create database db sql show create database db -if $rows != 1 then +if $rows != 1 then return -1 endi @@ -15,13 +15,13 @@ print =============== step3 sql use db sql show create database db -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != db then +if $data00 != db then return -1 -endi +endi sql drop database db diff --git a/tests/script/tsim/db/show_create_table.sim b/tests/script/tsim/db/show_create_table.sim index 44fa09577e0edcc1d9da8d4563a8b6184965882f..0aeee42d21936736b97fd943aca448c6e9ce7046 100644 --- a/tests/script/tsim/db/show_create_table.sim +++ b/tests/script/tsim/db/show_create_table.sim @@ -11,14 +11,14 @@ sql create table t0 using meters tags(1,'ch') sql create table normalTbl(ts timestamp, zone binary(8)) sql use db -sql show create table meters -if $rows != 1 then +sql show create table meters +if $rows != 1 then return -1 endi print ===============check sub table sql show create table t0 -if $rows != 1 then +if $rows != 1 then return -1 endi if $data00 == 't0' then @@ -27,8 +27,8 @@ endi print ===============check normal table -sql show create table normalTbl -if $rows != 1 then +sql show create table normalTbl +if $rows != 1 then return -1 endi @@ -37,8 +37,8 @@ if $data00 == 'normalTbl' then endi print ===============check super table -sql show create table meters -if $rows != 1 then +sql show create table meters +if $rows != 1 then return -1 endi @@ -49,7 +49,7 @@ endi print ===============check sub table with prefix sql show create table db.t0 -if $rows != 1 then +if $rows != 1 then return -1 endi @@ -58,8 +58,8 @@ if $data00 == 't0' then endi print ===============check normal table with prefix -sql show create table db.normalTbl -if $rows != 1 then +sql show create table db.normalTbl +if $rows != 1 then return -1 endi @@ -69,8 +69,8 @@ endi print ===============check super table with prefix -sql show create table db.meters -if $rows != 1 then +sql show create table db.meters +if $rows != 1 then return -1 endi diff --git a/tests/script/tsim/db/tables.sim b/tests/script/tsim/db/tables.sim index cdee504753535c042f77a399911b1f11662f1e3b..273a1fd45d7b18457662dfecd6788e5e33389bec 100644 --- a/tests/script/tsim/db/tables.sim +++ b/tests/script/tsim/db/tables.sim @@ -8,7 +8,7 @@ sql create database db sql select * from information_schema.ins_databases print $rows $data07 -if $rows != 3 then +if $rows != 3 then return -1 endi @@ -125,4 +125,4 @@ if $data01 != 4 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/insert/backquote.sim b/tests/script/tsim/insert/backquote.sim index fc8aa29c4ef40ad0c2d24b91e99a0d6a8590cbe1..f1b1c97e7d7de0b37c633c5db6ecc326970cf89f 100644 --- a/tests/script/tsim/insert/backquote.sim +++ b/tests/script/tsim/insert/backquote.sim @@ -8,9 +8,9 @@ print =============== create database sql create database `database` sql create database `DataBase` sql select * from information_schema.ins_databases -print rows: $rows +print rows: $rows print $data00 $data01 -print $data10 $data11 +print $data10 $data11 print $data20 $data21 if $rows != 4 then return -1 @@ -27,12 +27,12 @@ endi $dbCnt = 0 while $dbCnt < 2 - if $dbCnt == 0 then + if $dbCnt == 0 then sql use `database` - else + else sql use `DataBase` endi - + $dbCnt = $dbCnt + 1 print =============== create super table, include all type @@ -41,13 +41,13 @@ while $dbCnt < 2 sql create table `Stable` (`timestamp` timestamp, `int` int, `Binary` binary(32), `Nchar` nchar(32)) tags (`float` float, `binary` binary(16), `nchar` nchar(16)) sql show stables - print rows: $rows + print rows: $rows print $data00 $data01 print $data10 $data11 - if $rows != 2 then + if $rows != 2 then return -1 endi - if $data00 != Stable then + if $data00 != Stable then if $data00 != stable then return -1 endi @@ -57,24 +57,24 @@ while $dbCnt < 2 return -1 endi endi - + print =============== create child table sql create table `table` using `stable` tags(100.0, 'stable+table', 'stable+table') sql create table `Table` using `stable` tags(100.1, 'stable+Table', 'stable+Table') - + sql create table `TAble` using `Stable` tags(100.0, 'Stable+TAble', 'Stable+TAble') - sql create table `TABle` using `Stable` tags(100.1, 'Stable+TABle', 'Stable+TABle') - + sql create table `TABle` using `Stable` tags(100.1, 'Stable+TABle', 'Stable+TABle') + sql show tables - print rows: $rows + print rows: $rows print $data00 $data01 print $data10 $data11 print $data20 $data21 print $data30 $data31 - if $rows != 4 then + if $rows != 4 then return -1 endi - + print =============== insert data sql insert into `table` values(now+0s, 10, 'table', 'table')(now+1s, 11, 'table', 'table') sql insert into `Table` values(now+0s, 20, 'Table', 'Table')(now+1s, 21, 'Table', 'Table') @@ -86,15 +86,15 @@ while $dbCnt < 2 print rows: $rows print $data00 $data01 $data02 $data03 print $data10 $data11 $data12 $data13 - if $rows != 2 then + if $rows != 2 then return -1 - endi - if $data01 != 10 then + endi + if $data01 != 10 then return -1 - endi - if $data02 != table then + endi + if $data02 != table then return -1 - endi + endi if $data03 != table then print expect table, actual $data03 return -1 @@ -103,57 +103,57 @@ while $dbCnt < 2 print =================> 1 sql select * from `Table` - print rows: $rows + print rows: $rows print $data00 $data01 $data02 $data03 print $data10 $data11 $data12 $data13 - if $rows != 2 then + if $rows != 2 then return -1 - endi - if $data01 != 20 then + endi + if $data01 != 20 then return -1 - endi - if $data02 != Table then + endi + if $data02 != Table then return -1 - endi - if $data03 != Table then + endi + if $data03 != Table then return -1 endi print ================>2 sql select * from `TAble` - print rows: $rows + print rows: $rows print $data00 $data01 $data02 $data03 print $data10 $data11 $data12 $data13 - if $rows != 2 then + if $rows != 2 then return -1 - endi - if $data01 != 30 then + endi + if $data01 != 30 then return -1 - endi - if $data02 != TAble then + endi + if $data02 != TAble then return -1 - endi - if $data03 != TAble then + endi + if $data03 != TAble then return -1 endi - + sql select * from `TABle` - print rows: $rows + print rows: $rows print $data00 $data01 $data02 $data03 print $data10 $data11 $data12 $data13 - if $rows != 2 then + if $rows != 2 then return -1 - endi - if $data01 != 40 then + endi + if $data01 != 40 then return -1 - endi - if $data02 != TABle then + endi + if $data02 != TABle then return -1 - endi - if $data03 != TABle then + endi + if $data03 != TABle then return -1 endi - + #print =============== query data from st, but not support select * from super table, waiting fix #sql select count(*) from `stable` #print rows: $rows @@ -174,7 +174,7 @@ while $dbCnt < 2 # return -1 #endi #sql select * from `stable` - #if $rows != 4 then + #if $rows != 4 then # return -1 #endi @@ -185,9 +185,9 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s start sql select * from information_schema.ins_databases -print rows: $rows +print rows: $rows print $data00 $data01 -print $data10 $data11 +print $data10 $data11 print $data20 $data21 if $rows != 4 then return -1 @@ -204,22 +204,22 @@ endi $dbCnt = 0 while $dbCnt < 2 - if $dbCnt == 0 then + if $dbCnt == 0 then sql use `database` - else + else sql use `DataBase` endi - + $dbCnt = $dbCnt + 1 sql show stables - print rows: $rows + print rows: $rows print $data00 $data01 print $data10 $data11 - if $rows != 2 then + if $rows != 2 then return -1 endi - if $data00 != Stable then + if $data00 != Stable then if $data00 != stable then return -1 endi @@ -229,86 +229,86 @@ while $dbCnt < 2 return -1 endi endi - + sql show tables - print rows: $rows + print rows: $rows print $data00 $data01 print $data10 $data11 print $data20 $data21 print $data30 $data31 - if $rows != 4 then + if $rows != 4 then return -1 endi - + print =============== query data sql select * from `table` - print rows: $rows + print rows: $rows print $data00 $data01 $data02 $data03 print $data10 $data11 $data12 $data13 - if $rows != 2 then + if $rows != 2 then return -1 - endi - if $data01 != 10 then + endi + if $data01 != 10 then return -1 - endi - if $data02 != table then + endi + if $data02 != table then return -1 - endi - if $data03 != table then + endi + if $data03 != table then return -1 endi - + sql select * from `Table` - print rows: $rows + print rows: $rows print $data00 $data01 $data02 $data03 print $data10 $data11 $data12 $data13 - if $rows != 2 then + if $rows != 2 then return -1 - endi - if $data01 != 20 then + endi + if $data01 != 20 then return -1 - endi - if $data02 != Table then + endi + if $data02 != Table then return -1 - endi - if $data03 != Table then + endi + if $data03 != Table then return -1 endi - + sql select * from `TAble` - print rows: $rows + print rows: $rows print $data00 $data01 $data02 $data03 print $data10 $data11 $data12 $data13 - if $rows != 2 then + if $rows != 2 then return -1 - endi - if $data01 != 30 then + endi + if $data01 != 30 then return -1 - endi - if $data02 != TAble then + endi + if $data02 != TAble then return -1 - endi - if $data03 != TAble then + endi + if $data03 != TAble then return -1 endi - + sql select * from `TABle` - print rows: $rows + print rows: $rows print $data00 $data01 $data02 $data03 print $data10 $data11 $data12 $data13 - if $rows != 2 then + if $rows != 2 then return -1 - endi - if $data01 != 40 then + endi + if $data01 != 40 then return -1 - endi - if $data02 != TABle then + endi + if $data02 != TABle then return -1 - endi - if $data03 != TABle then + endi + if $data03 != TABle then return -1 endi - + #print =============== query data from st, but not support select * from super table, waiting fix #sql select count(*) from `stable` #print rows: $rows @@ -329,7 +329,7 @@ while $dbCnt < 2 # return -1 #endi #sql select * from `stable` - #if $rows != 4 then + #if $rows != 4 then # return -1 #endi diff --git a/tests/script/tsim/insert/basic.sim b/tests/script/tsim/insert/basic.sim index 20b39c8f0099605120882214b6e0fb478ae43e23..c4ef3e39dab9f09028ecfd401fb6444a0fe0ecd4 100644 --- a/tests/script/tsim/insert/basic.sim +++ b/tests/script/tsim/insert/basic.sim @@ -20,26 +20,26 @@ $x = 0 while $x < 10 $cc = $x * 60000 $ms = 1601481600000 + $cc - - sql insert into $tb values ($ms , $x ) + + sql insert into $tb values ($ms , $x ) $x = $x + 1 -endw +endw print =============== step 2 $x = 0 while $x < 5 $cc = $x * 60000 $ms = 1551481600000 + $cc - - sql insert into $tb values ($ms , $x ) + + sql insert into $tb values ($ms , $x ) $x = $x + 1 -endw +endw sql select * from $tb print $rows points data are retrieved -if $rows != 15 then +if $rows != 15 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/insert/basic0.sim b/tests/script/tsim/insert/basic0.sim index 5b506de01f85f2dcce21fc890cb73927fda6ac76..be023352f98e7c61e92c01090e686eb74e6dc8e8 100644 --- a/tests/script/tsim/insert/basic0.sim +++ b/tests/script/tsim/insert/basic0.sim @@ -18,7 +18,7 @@ print =============== create super table, include column type for count/sum/min/ sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned) sql show stables -if $rows != 1 then +if $rows != 1 then return -1 endi @@ -28,7 +28,7 @@ sql create table ct2 using stb tags(2000) sql create table ct3 using stb tags(3000) sql show tables -if $rows != 3 then +if $rows != 3 then return -1 endi @@ -46,7 +46,7 @@ sql insert into ct3 values('2021-01-01 00:00:00.000', 10, 2.0, 3.0) #=================================================================== print =============== query data from child table sql select * from ct1 -print rows: $rows +print rows: $rows print $data00 $data01 $data02 $data03 print $data10 $data11 $data12 $data13 print $data20 $data21 $data22 $data23 @@ -58,111 +58,111 @@ if $data01 != 10 then print expect 10, actual: $data01 return -1 endi -if $data02 != 2.00000 then +if $data02 != 2.00000 then return -1 endi -if $data03 != 3.000000000 then +if $data03 != 3.000000000 then return -1 endi print =============== select count(*) from child table sql select count(*) from ct1 -print rows: $rows +print rows: $rows print $data00 $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != 4 then +if $data00 != 4 then return -1 endi print =============== select count(column) from child table sql select count(ts), count(c1), count(c2), count(c3) from ct1 -print rows: $rows +print rows: $rows print $data00 $data01 $data02 $data03 -if $data00 != 4 then +if $data00 != 4 then return -1 endi -if $data01 != 4 then +if $data01 != 4 then return -1 endi -if $data02 != 4 then +if $data02 != 4 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi #print =============== select first(*)/first(column) from child table sql select first(*) from ct1 print ====> select first(*) from ct1 -print rows: $rows +print rows: $rows print $data00 $data01 $data02 $data03 sql select first(ts), first(c1), first(c2), first(c3) from ct1 print ====> select first(ts), first(c1), first(c2), first(c3) from ct1 -print rows: $rows +print rows: $rows print $data00 $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 10 then +if $data01 != 10 then return -1 endi -if $data02 != 2.00000 then +if $data02 != 2.00000 then return -1 endi -if $data03 != 3.000000000 then +if $data03 != 3.000000000 then return -1 endi print =============== select min(column) from child table sql select min(c1), min(c2), min(c3) from ct1 -print rows: $rows +print rows: $rows print $data00 $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != -13 then +if $data00 != -13 then return -1 endi if $data01 != -2.30000 then print expect -2.30000, actual: $data01 return -1 endi -if $data02 != -3.300000000 then +if $data02 != -3.300000000 then return -1 endi print =============== select max(column) from child table sql select max(c1), max(c2), max(c3) from ct1 print $data00 $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != 11 then +if $data00 != 11 then return -1 endi -if $data01 != 2.10000 then +if $data01 != 2.10000 then return -1 endi -if $data02 != 3.100000000 then +if $data02 != 3.100000000 then return -1 endi print =============== select sum(column) from child table sql select sum(c1), sum(c2), sum(c3) from ct1 print $data00 $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != -4 then +if $data00 != -4 then return -1 endi -if $data01 != -0.400000095 then +if $data01 != -0.400000095 then return -1 endi -if $data02 != -0.400000000 then +if $data02 != -0.400000000 then return -1 endi @@ -173,34 +173,34 @@ print $data00 $data01 $data02 print $data10 $data11 $data12 print $data20 $data21 $data22 print $data30 $data31 $data32 -if $rows != 4 then +if $rows != 4 then return -1 endi -if $data00 != 10 then +if $data00 != 10 then return -1 endi -if $data01 != 2.00000 then +if $data01 != 2.00000 then return -1 endi -if $data02 != 3.000000000 then +if $data02 != 3.000000000 then return -1 endi -if $data10 != 11 then +if $data10 != 11 then return -1 endi -if $data11 != 2.10000 then +if $data11 != 2.10000 then return -1 endi -if $data12 != 3.100000000 then +if $data12 != 3.100000000 then return -1 endi -if $data30 != -13 then +if $data30 != -13 then return -1 endi -if $data31 != -2.30000 then +if $data31 != -2.30000 then return -1 endi -if $data32 != -3.300000000 then +if $data32 != -3.300000000 then return -1 endi #=================================================================== @@ -208,17 +208,17 @@ endi #print =============== query data from stb sql select * from stb print $rows -if $rows != 9 then +if $rows != 9 then return -1 endi #print =============== select count(*) from supter table sql select count(*) from stb print $data00 $data01 $data02 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != 9 then +if $data00 != 9 then return -1 endi @@ -234,19 +234,19 @@ print $data50 $data51 $data52 $data53 print $data60 $data61 $data62 $data63 print $data70 $data71 $data72 $data73 print $data80 $data81 $data82 $data83 -if $rows != 9 then +if $rows != 9 then return -1 endi -# The order of data from different sub tables in the super table is random, +# The order of data from different sub tables in the super table is random, # so this detection may fail randomly -if $data01 != 10 then +if $data01 != 10 then return -1 endi -if $data02 != 2.00000 then +if $data02 != 2.00000 then return -1 endi -if $data03 != 3.000000000 then +if $data03 != 3.000000000 then return -1 endi @@ -254,16 +254,16 @@ endi sql select count(ts), count(c1), count(c2), count(c3) from stb print rows: $rows print $data00 $data01 $data02 $data03 -if $data00 != 9 then +if $data00 != 9 then return -1 endi -if $data01 != 9 then +if $data01 != 9 then return -1 endi -if $data02 != 9 then +if $data02 != 9 then return -1 endi -if $data03 != 9 then +if $data03 != 9 then return -1 endi @@ -274,7 +274,7 @@ system sh/exec.sh -n dnode1 -s start print =============== query data from child table sql select * from ct1 -print rows: $rows +print rows: $rows print $data00 $data01 $data02 $data03 print $data10 $data11 $data12 $data13 print $data20 $data21 $data22 $data23 @@ -282,113 +282,113 @@ print $data30 $data31 $data32 $data33 if $rows != 4 then return -1 endi -if $data01 != 10 then +if $data01 != 10 then return -1 endi -if $data02 != 2.00000 then +if $data02 != 2.00000 then return -1 endi -if $data03 != 3.000000000 then +if $data03 != 3.000000000 then return -1 endi print =============== select count(*) from child table sql select count(*) from ct1 -print rows: $rows +print rows: $rows print $data00 $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != 4 then +if $data00 != 4 then return -1 endi print =============== select count(column) from child table sql select count(ts), count(c1), count(c2), count(c3) from ct1 -print rows: $rows +print rows: $rows print $data00 $data01 $data02 $data03 -if $data00 != 4 then +if $data00 != 4 then return -1 endi -if $data01 != 4 then +if $data01 != 4 then return -1 endi -if $data02 != 4 then +if $data02 != 4 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi #print =============== select first(*)/first(column) from child table sql select first(*) from ct1 print ====> select first(*) from ct1 -print rows: $rows +print rows: $rows print $data00 $data01 $data02 $data03 sql select first(ts), first(c1), first(c2), first(c3) from ct1 print ====> select first(ts), first(c1), first(c2), first(c3) from ct1 -print rows: $rows +print rows: $rows print $data00 $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 10 then +if $data01 != 10 then return -1 endi -if $data02 != 2.00000 then +if $data02 != 2.00000 then return -1 endi -if $data03 != 3.000000000 then +if $data03 != 3.000000000 then return -1 endi print =============== select min(column) from child table sql select min(c1), min(c2), min(c3) from ct1 -print rows: $rows +print rows: $rows print $data00 $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != -13 then +if $data00 != -13 then return -1 endi -if $data01 != -2.30000 then +if $data01 != -2.30000 then return -1 endi -if $data02 != -3.300000000 then +if $data02 != -3.300000000 then return -1 endi print =============== select max(column) from child table sql select max(c1), max(c2), max(c3) from ct1 print $data00 $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != 11 then +if $data00 != 11 then return -1 endi -if $data01 != 2.10000 then +if $data01 != 2.10000 then return -1 endi -if $data02 != 3.100000000 then +if $data02 != 3.100000000 then return -1 endi print =============== select sum(column) from child table sql select sum(c1), sum(c2), sum(c3) from ct1 print $data00 $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != -4 then +if $data00 != -4 then return -1 endi -if $data01 != -0.400000095 then +if $data01 != -0.400000095 then return -1 endi -if $data02 != -0.400000000 then +if $data02 != -0.400000000 then return -1 endi @@ -399,51 +399,51 @@ print $data00 $data01 $data02 print $data10 $data11 $data12 print $data20 $data21 $data22 print $data30 $data31 $data32 -if $rows != 4 then +if $rows != 4 then return -1 endi -if $data00 != 10 then +if $data00 != 10 then return -1 endi -if $data01 != 2.00000 then +if $data01 != 2.00000 then return -1 endi -if $data02 != 3.000000000 then +if $data02 != 3.000000000 then return -1 endi -if $data10 != 11 then +if $data10 != 11 then return -1 endi -if $data11 != 2.10000 then +if $data11 != 2.10000 then return -1 endi -if $data12 != 3.100000000 then +if $data12 != 3.100000000 then return -1 endi -if $data30 != -13 then +if $data30 != -13 then return -1 endi -if $data31 != -2.30000 then +if $data31 != -2.30000 then return -1 endi -if $data32 != -3.300000000 then +if $data32 != -3.300000000 then return -1 endi #=================================================================== print =============== query data from stb sql select * from stb -if $rows != 9 then +if $rows != 9 then return -1 -endi +endi print =============== select count(*) from supter table sql select count(*) from stb print $data00 $data01 $data02 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != 9 then +if $data00 != 9 then return -1 endi @@ -459,35 +459,35 @@ print $data50 $data51 $data52 $data53 print $data60 $data61 $data62 $data63 print $data70 $data71 $data72 $data73 print $data80 $data81 $data82 $data83 -if $rows != 9 then +if $rows != 9 then return -1 endi -# The order of data from different sub tables in the super table is random, +# The order of data from different sub tables in the super table is random, # so this detection may fail randomly -if $data01 != 10 then +if $data01 != 10 then return -1 endi -if $data02 != 2.00000 then +if $data02 != 2.00000 then return -1 endi -if $data03 != 3.000000000 then +if $data03 != 3.000000000 then return -1 endi #print =============== select count(column) from supter table sql select count(ts), count(c1), count(c2), count(c3) from stb print $data00 $data01 $data02 $data03 -if $data00 != 9 then +if $data00 != 9 then return -1 endi -if $data01 != 9 then +if $data01 != 9 then return -1 endi -if $data02 != 9 then +if $data02 != 9 then return -1 endi -if $data03 != 9 then +if $data03 != 9 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/insert/basic1.sim b/tests/script/tsim/insert/basic1.sim index 72a883bedf615d57e16ab32589c9c804de599c75..2e3d6fa513147174329297726e0727ab8e4d6c3e 100644 --- a/tests/script/tsim/insert/basic1.sim +++ b/tests/script/tsim/insert/basic1.sim @@ -21,7 +21,7 @@ sql create table stb_2 (ts timestamp, i int) tags (j int) sql create stable stb_3 (ts timestamp, i int) tags (j int) sql show stables -if $rows != 4 then +if $rows != 4 then return -1 endi @@ -30,7 +30,7 @@ sql create table c1 using stb tags(true, -1, -2, -3, -4, -6.0, -7.0, 'child tbl sql create table c2 using stb tags(false, -1, -2, -3, -4, -6.0, -7.0, 'child tbl 2', 'child tbl 2', '2022-02-25 18:00:00.000', 10, 20, 30, 40) sql show tables -if $rows != 2 then +if $rows != 2 then return -1 endi @@ -39,12 +39,12 @@ sql insert into c1 values(now+0s, true, -1, -2, -3, -4, -6.0, -7.0, 'child tbl 1 print =============== query data sql select * from c1 -print rows: $rows +print rows: $rows print $data00 $data01 -print $data10 $data11 +print $data10 $data11 print $data20 $data21 print $data30 $data31 -if $rows != 4 then +if $rows != 4 then return -1 endi @@ -53,17 +53,17 @@ if $data01 != 1 then return -1 endi -if $data02 != -1 then +if $data02 != -1 then return -1 endi -if $data03 != -2 then +if $data03 != -2 then return -1 endi print =============== query data from st, but not support select * from super table, waiting fix sql select * from stb -if $rows != 4 then +if $rows != 4 then return -1 endi @@ -73,12 +73,12 @@ system sh/exec.sh -n dnode1 -s start print =============== query data sql select * from c1 -print rows: $rows +print rows: $rows print $data00 $data01 -print $data10 $data11 +print $data10 $data11 print $data20 $data21 print $data30 $data31 -if $rows != 4 then +if $rows != 4 then return -1 endi @@ -86,17 +86,17 @@ if $data01 != 1 then return -1 endi -if $data02 != -1 then +if $data02 != -1 then return -1 endi -if $data03 != -2 then +if $data03 != -2 then return -1 endi print =============== query data from st, but not support select * from super table, waiting fix sql select * from stb -if $rows != 4 then +if $rows != 4 then return -1 endi diff --git a/tests/script/tsim/insert/basic2.sim b/tests/script/tsim/insert/basic2.sim index eca46697f5bfdca223d922583f98f85838fe25c9..1794bb54f8dad2d250a192c24895ed6c5da28c7c 100644 --- a/tests/script/tsim/insert/basic2.sim +++ b/tests/script/tsim/insert/basic2.sim @@ -11,7 +11,7 @@ print =============== create super table sql create table if not exists stb (ts timestamp, c1 int unsigned, c2 double, c3 binary(10), c4 nchar(10), c5 double) tags (city binary(20),district binary(20)); sql show stables -if $rows != 1 then +if $rows != 1 then return -1 endi @@ -20,7 +20,7 @@ sql create table ct1 using stb tags("BeiJing", "ChaoYang") sql create table ct2 using stb tags("BeiJing", "HaiDian") sql show tables -if $rows != 2 then +if $rows != 2 then return -1 endi @@ -47,7 +47,7 @@ print $data20 $data21 $data22 $data23 $data24 $data25 print $data30 $data31 $data32 $data33 $data34 $data35 print $data40 $data41 $data42 $data43 $data44 $data45 -if $rows != 5 then +if $rows != 5 then print rows $rows != 5 return -1 endi @@ -189,7 +189,7 @@ print $data20 $data21 $data22 $data23 $data24 $data25 print $data30 $data31 $data32 $data33 $data34 $data35 print $data40 $data41 $data42 $data43 $data44 $data45 -if $rows != 5 then +if $rows != 5 then print rows $rows != 5 return -1 endi @@ -319,4 +319,4 @@ if $data45 != 30.000000000 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/insert/commit-merge0.sim b/tests/script/tsim/insert/commit-merge0.sim index dfc22354d2272d824b1d85691dfa57a502f4522e..da66560cbd90b87c5c170c5a15c119489b86a7e7 100644 --- a/tests/script/tsim/insert/commit-merge0.sim +++ b/tests/script/tsim/insert/commit-merge0.sim @@ -63,7 +63,7 @@ reboot_and_check: system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s start -print =============== insert duplicated records to memory - loop $reboot_max - $reboot_cnt +print =============== insert duplicated records to memory - loop $reboot_max - $reboot_cnt sql use db sql insert into ct1 values ('2022-05-01 18:30:27.001', 0.0); sql insert into ct4 values ('2022-04-28 18:30:27.002', 0.0); @@ -91,7 +91,7 @@ sql insert into ct4 values ('2018-05-01 18:30:27.023', NULL) ; sql insert into ct4 values ('2021-03-01 18:30:27.024', NULL) ; sql insert into ct4 values ('2022-08-01 18:30:27.025', NULL) ; -print =============== select * from ct1 - merge memory and file - loop $reboot_max - $reboot_cnt +print =============== select * from ct1 - merge memory and file - loop $reboot_max - $reboot_cnt sql select * from ct1; if $rows != 13 then print rows = $rows != 13 @@ -163,8 +163,8 @@ if $data[12][1] != -99.990000000 then print $data[12][1] != -99.990000000 return -1 endi - -print =============== select * from ct4 - merge memory and file - loop $reboot_max - $reboot_cnt + +print =============== select * from ct4 - merge memory and file - loop $reboot_max - $reboot_cnt sql select * from ct4; if $rows != 12 then print rows = $rows != 12 diff --git a/tests/script/tsim/insert/insert_drop.sim b/tests/script/tsim/insert/insert_drop.sim index 020fd367ae0b1d5146862a8c4a634f29887b8516..467eb3a702198f9c3097e4e66870c344d48fa4d0 100644 --- a/tests/script/tsim/insert/insert_drop.sim +++ b/tests/script/tsim/insert/insert_drop.sim @@ -25,16 +25,16 @@ $ts = $ts0 while $i < 10 $tb = tb . $i sql create table $tb using $stb tags( $i ) - + $x = 0 while $x < $rowNum $xs = $x * $delta $ts = $ts0 + $xs - sql insert into $tb values ( $ts , $x ) + sql insert into $tb values ( $ts , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print ====== tables created print ================== restart server to commit data into disk @@ -46,18 +46,18 @@ sql use $db sql drop table tb5 $i = 0 while $i < 4 - + $tb = tb . $i $x = 0 while $x < $rowNum $xs = $x * $delta $ts = $ts0 + $xs - sql insert into $tb values ( $ts , $x ) + sql insert into $tb values ( $ts , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT @@ -73,4 +73,4 @@ if $rows != 0 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/insert/insert_select.sim b/tests/script/tsim/insert/insert_select.sim index c44197d7d41b34544fe17f0b1ed8ee226eaaca24..e3374ee277ae982d2df476b981ca7fc58f76199d 100644 --- a/tests/script/tsim/insert/insert_select.sim +++ b/tests/script/tsim/insert/insert_select.sim @@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/exec.sh -n dnode1 -s start sql connect -print ======== step1 +print ======== step1 sql drop database if exists db1; sql create database db1 vgroups 3; sql use db1; diff --git a/tests/script/tsim/insert/null.sim b/tests/script/tsim/insert/null.sim index 49adb8ebe04d962ac7bdb6312ec288aed0843808..57aef3f6a54bfb2910f77ffe38018825b6e34e7c 100644 --- a/tests/script/tsim/insert/null.sim +++ b/tests/script/tsim/insert/null.sim @@ -18,7 +18,7 @@ print =============== create super table, include column type for count/sum/min/ sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double, c4 bigint) tags (t1 int unsigned) sql show stables -if $rows != 1 then +if $rows != 1 then return -1 endi @@ -28,7 +28,7 @@ sql create table ct2 using stb tags(2000) sql create table ct3 using stb tags(3000) sql show tables -if $rows != 3 then +if $rows != 3 then return -1 endi @@ -55,22 +55,22 @@ print ===> rows4: $data40 $data41 $data42 $data43 $data44 if $rows != 12 then return -1 endi -if $data01 != 10 then +if $data01 != 10 then return -1 endi -if $data02 != 2.00000 then +if $data02 != 2.00000 then return -1 endi -if $data03 != 3.000000000 then +if $data03 != 3.000000000 then return -1 endi -#if $data41 != -14 then +#if $data41 != -14 then # return -1 #endi -#if $data42 != -2.40000 then +#if $data42 != -2.40000 then # return -1 #endi -#if $data43 != -3.400000000 then +#if $data43 != -3.400000000 then # return -1 #endi @@ -79,10 +79,10 @@ sql select count(*) from ct1 print ===> select count(*) from ct1 print ===> rows: $rows print ===> rows0: $data00 $data01 $data02 $data03 $data04 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != 12 then +if $data00 != 12 then return -1 endi @@ -91,16 +91,16 @@ sql select count(ts), count(c1), count(c2), count(c3) from ct1 print ===> select count(ts), count(c1), count(c2), count(c3) from ct1 print ===> rows: $rows print ===> rows0: $data00 $data01 $data02 $data03 $data04 -if $data00 != 12 then +if $data00 != 12 then return -1 endi -if $data01 != 8 then +if $data01 != 8 then return -1 endi -if $data02 != 8 then +if $data02 != 8 then return -1 endi -if $data03 != 8 then +if $data03 != 8 then return -1 endi @@ -113,16 +113,16 @@ sql select min(c1), min(c2), min(c3) from ct1 print ===> select min(c1), min(c2), min(c3) from ct1 print ===> rows: $rows print ===> rows0: $data00 $data01 $data02 $data03 $data04 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != -2147483647 then +if $data00 != -2147483647 then return -1 endi -if $data01 != 2.00000 then +if $data01 != 2.00000 then return -1 endi -if $data02 != 3.000000000 then +if $data02 != 3.000000000 then return -1 endi @@ -131,16 +131,16 @@ sql select max(c1), max(c2), max(c3) from ct1 print ===> select max(c1), max(c2), max(c3) from ct1 print ===> rows: $rows print ===> rows0: $data00 $data01 $data02 $data03 $data04 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != 16 then +if $data00 != 16 then return -1 endi -if $data01 != 2.70000 then +if $data01 != 2.70000 then return -1 endi -if $data02 != 3.700000000 then +if $data02 != 3.700000000 then return -1 endi @@ -149,16 +149,16 @@ sql select sum(c1), sum(c2), sum(c3) from ct1 print ===> select sum(c1), sum(c2), sum(c3) from ct1 print ===> rows: $rows print ===> rows0: $data00 $data01 $data02 $data03 $data04 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != -2147483556 then +if $data00 != -2147483556 then return -1 endi -if $data01 != 18.799999952 then +if $data01 != 18.799999952 then return -1 endi -if $data02 != 26.800000000 then +if $data02 != 26.800000000 then return -1 endi @@ -167,43 +167,43 @@ sql select c1, c2, c3 from ct1 print ===> select c1, c2, c3 from ct1 print ===> rows: $rows print ===> rows0: $data00 $data01 $data02 $data03 $data04 -if $rows != 12 then +if $rows != 12 then return -1 endi -if $data00 != 10 then +if $data00 != 10 then return -1 endi -if $data01 != 2.00000 then +if $data01 != 2.00000 then return -1 endi -if $data02 != 3.000000000 then +if $data02 != 3.000000000 then return -1 endi -if $data10 != NULL then +if $data10 != NULL then return -1 endi -if $data11 != NULL then +if $data11 != NULL then return -1 endi -if $data12 != NULL then +if $data12 != NULL then return -1 endi -if $data30 != 11 then +if $data30 != 11 then return -1 endi -if $data31 != NULL then +if $data31 != NULL then return -1 endi -if $data32 != 3.200000000 then +if $data32 != 3.200000000 then return -1 endi -if $data90 != 16 then +if $data90 != 16 then return -1 endi -if $data91 != 2.60000 then +if $data91 != 2.60000 then return -1 endi -if $data92 != 3.600000000 then +if $data92 != 3.600000000 then return -1 endi #=================================================================== @@ -211,36 +211,36 @@ endi #print =============== query data from stb sql select * from stb -print ===> +print ===> print ===> rows: $rows print ===> rows0: $data00 $data01 $data02 $data03 $data04 -if $rows != 12 then +if $rows != 12 then return -1 endi #print =============== select count(*) from supter table sql select count(*) from stb print $data00 $data01 $data02 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != 12 then +if $data00 != 12 then return -1 endi #print =============== select count(column) from supter table sql select count(ts), count(c1), count(c2), count(c3) from stb print $data00 $data01 $data02 $data03 -if $data00 != 12 then +if $data00 != 12 then return -1 endi -if $data01 != 8 then +if $data01 != 8 then return -1 endi -if $data02 != 8 then +if $data02 != 8 then return -1 endi -if $data03 != 8 then +if $data03 != 8 then return -1 endi @@ -264,22 +264,22 @@ print ===> rows4: $data40 $data41 $data42 $data43 $data44 if $rows != 12 then return -1 endi -if $data01 != 10 then +if $data01 != 10 then return -1 endi -if $data02 != 2.00000 then +if $data02 != 2.00000 then return -1 endi -if $data03 != 3.000000000 then +if $data03 != 3.000000000 then return -1 endi -if $data41 != 12 then +if $data41 != 12 then return -1 endi -if $data42 != 2.20000 then +if $data42 != 2.20000 then return -1 endi -if $data43 != NULL then +if $data43 != NULL then return -1 endi @@ -288,10 +288,10 @@ sql select count(*) from ct1 print ===> select count(*) from ct1 print ===> rows: $rows print ===> rows0: $data00 $data01 $data02 $data03 $data04 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != 12 then +if $data00 != 12 then return -1 endi @@ -300,16 +300,16 @@ sql select count(ts), count(c1), count(c2), count(c3) from ct1 print ===> select count(ts), count(c1), count(c2), count(c3) from ct1 print ===> rows: $rows print ===> rows0: $data00 $data01 $data02 $data03 $data04 -if $data00 != 12 then +if $data00 != 12 then return -1 endi -if $data01 != 8 then +if $data01 != 8 then return -1 endi -if $data02 != 8 then +if $data02 != 8 then return -1 endi -if $data03 != 8 then +if $data03 != 8 then return -1 endi @@ -322,16 +322,16 @@ sql select min(c1), min(c2), min(c3) from ct1 print ===> select min(c1), min(c2), min(c3) from ct1 print ===> rows: $rows print ===> rows0: $data00 $data01 $data02 $data03 $data04 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != -2147483647 then +if $data00 != -2147483647 then return -1 endi -if $data01 != 2.00000 then +if $data01 != 2.00000 then return -1 endi -if $data02 != 3.000000000 then +if $data02 != 3.000000000 then return -1 endi @@ -340,16 +340,16 @@ sql select max(c1), max(c2), max(c3) from ct1 print ===> select max(c1), max(c2), max(c3) from ct1 print ===> rows: $rows print ===> rows0: $data00 $data01 $data02 $data03 $data04 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != 16 then +if $data00 != 16 then return -1 endi -if $data01 != 2.70000 then +if $data01 != 2.70000 then return -1 endi -if $data02 != 3.700000000 then +if $data02 != 3.700000000 then return -1 endi @@ -358,16 +358,16 @@ sql select sum(c1), sum(c2), sum(c3) from ct1 print ===> select sum(c1), sum(c2), sum(c3) from ct1 print ===> rows: $rows print ===> rows0: $data00 $data01 $data02 $data03 $data04 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != -2147483556 then +if $data00 != -2147483556 then return -1 endi -if $data01 != 18.799999952 then +if $data01 != 18.799999952 then return -1 endi -if $data02 != 26.800000000 then +if $data02 != 26.800000000 then return -1 endi @@ -376,78 +376,78 @@ sql select c1, c2, c3 from ct1 print ===> select c1, c2, c3 from ct1 print ===> rows: $rows print ===> rows0: $data00 $data01 $data02 $data03 $data04 -if $rows != 12 then +if $rows != 12 then return -1 endi -if $data00 != 10 then +if $data00 != 10 then return -1 endi -if $data01 != 2.00000 then +if $data01 != 2.00000 then return -1 endi -if $data02 != 3.000000000 then +if $data02 != 3.000000000 then return -1 endi -if $data10 != NULL then +if $data10 != NULL then return -1 endi -if $data11 != NULL then +if $data11 != NULL then return -1 endi -if $data12 != NULL then +if $data12 != NULL then return -1 endi -if $data30 != 11 then +if $data30 != 11 then return -1 endi -if $data31 != NULL then +if $data31 != NULL then return -1 endi -if $data32 != 3.200000000 then +if $data32 != 3.200000000 then return -1 endi -if $data90 != 16 then +if $data90 != 16 then return -1 endi -if $data91 != 2.60000 then +if $data91 != 2.60000 then return -1 endi -if $data92 != 3.600000000 then +if $data92 != 3.600000000 then return -1 endi #=================================================================== print =============== query data from stb sql select * from stb -print ===> +print ===> print ===> rows: $rows print ===> rows0: $data00 $data01 $data02 $data03 $data04 -if $rows != 12 then +if $rows != 12 then return -1 endi print =============== select count(*) from supter table sql select count(*) from stb print $data00 $data01 $data02 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data00 != 12 then +if $data00 != 12 then return -1 endi print =============== select count(column) from supter table sql select count(ts), count(c1), count(c2), count(c3) from stb print $data00 $data01 $data02 $data03 -if $data00 != 12 then +if $data00 != 12 then return -1 endi -if $data01 != 8 then +if $data01 != 8 then return -1 endi -if $data02 != 8 then +if $data02 != 8 then return -1 endi -if $data03 != 8 then +if $data03 != 8 then return -1 endi diff --git a/tests/script/tsim/insert/query_block1_file.sim b/tests/script/tsim/insert/query_block1_file.sim index c6bda6d0610f377ae11dc45b757f97216e3ded70..8d2d2664d7ec83a685d856577054d0ce4c78a527 100644 --- a/tests/script/tsim/insert/query_block1_file.sim +++ b/tests/script/tsim/insert/query_block1_file.sim @@ -16,36 +16,36 @@ sql create database $db sql use $db sql create table $tb (ts timestamp, speed int) -#commit to file will trigger if insert 82 rows +#commit to file will trigger if insert 82 rows -$N = 82 +$N = 82 print =============== step 1 $x = $N $y = $N / 2 while $x > $y $ms = $x . m - $xt = - . $x - sql insert into $tb values (now - $ms , -$x ) + $xt = - . $x + sql insert into $tb values (now - $ms , -$x ) $x = $x - 1 -endw +endw sql select * from $tb -print sql select * from $tb -> $rows points -if $rows != $y then +print sql select * from $tb -> $rows points +if $rows != $y then return -1 endi $x = $N / 2 $y = $N while $x < $y - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 -endw +endw sql select * from $tb -print sql select * from $tb -> $rows points -if $rows != $N then +print sql select * from $tb -> $rows points +if $rows != $N then return -1 endi @@ -53,18 +53,18 @@ print =============== step 2 $R = 4 $x = $N * 2 -$y = $N * $R +$y = $N * $R $expect = $y + $N $y = $y + $x while $x < $y - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 -endw +endw sql select * from $tb -print sql select * from $tb -> $rows points -if $rows != $expect then +print sql select * from $tb -> $rows points +if $rows != $expect then return -1 endi @@ -100,7 +100,7 @@ endi sql select * from $tb where ts < $start2 and ts > $end1 print select * from $tb where ts < $start2 and ts > $end1 -> $rows points -if $rows != $result1 then +if $rows != $result1 then return -1 endi @@ -115,23 +115,23 @@ if $rows != 0 then endi sql select * from $tb where ts < $start3 and ts > $end1 -print sql select * from $tb where ts < $start3 and ts > $end1 -> $rows points -if $rows != $result2 then +print sql select * from $tb where ts < $start3 and ts > $end1 -> $rows points +if $rows != $result2 then return -1 endi sql select * from $tb where ts < $start3 and ts > $end2 -print sql select * from $tb where ts < $start3 and ts > $end2 -> $rows points -if $rows != $result1 then +print sql select * from $tb where ts < $start3 and ts > $end2 -> $rows points +if $rows != $result1 then return -1 endi -sql select * from $tb where ts < $start3 and ts > $end3 +sql select * from $tb where ts < $start3 and ts > $end3 if $rows != 0 then return -1 endi -print ================= order by ts desc +print ================= order by ts desc sql select * from $tb where ts < $start1 and ts > $end1 order by ts desc if $rows != 0 then @@ -148,9 +148,9 @@ if $rows != 0 then return -1 endi -sql select * from $tb where ts < $start2 and ts > $end1 order by ts desc +sql select * from $tb where ts < $start2 and ts > $end1 order by ts desc print select * from $tb where ts < $start2 and ts > $end1 order by ts desc -> $rows points -if $rows != $result1 then +if $rows != $result1 then return -1 endi @@ -164,15 +164,15 @@ if $rows != 0 then return -1 endi -sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc -print sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc -> $rows points -if $rows != $result2 then +sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc +print sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc -> $rows points +if $rows != $result2 then return -1 endi -sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc -print sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc -> $rows points -if $rows != $result1 then +sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc +print sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc -> $rows points +if $rows != $result1 then return -1 endi @@ -185,8 +185,8 @@ clear: sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/insert/query_block1_memory.sim b/tests/script/tsim/insert/query_block1_memory.sim index 110255bd90c4c9e4e361b14dfd09497baf352653..168f0e19c9bd94f66a0ed1dcdcb18fcedd35becf 100644 --- a/tests/script/tsim/insert/query_block1_memory.sim +++ b/tests/script/tsim/insert/query_block1_memory.sim @@ -17,9 +17,9 @@ sql use $db sql create table $tb (ts timestamp, speed int) -#commit to file will trigger if insert 82 rows +#commit to file will trigger if insert 82 rows -$N = 82 +$N = 82 print =============== step 1 $x = $N @@ -28,14 +28,14 @@ while $x > $y $z = $x * 60000 $ms = 1601481600000 - $z - $xt = - . $x - sql insert into $tb values ($ms , -$x ) + $xt = - . $x + sql insert into $tb values ($ms , -$x ) $x = $x - 1 -endw +endw sql select * from $tb -print sql select * from $tb -> $rows points -if $rows != $y then +print sql select * from $tb -> $rows points +if $rows != $y then return -1 endi @@ -45,12 +45,12 @@ while $x < $y $z = $x * 60000 $ms = 1601481600000 + $z - sql insert into $tb values ($ms , $x ) + sql insert into $tb values ($ms , $x ) $x = $x + 1 -endw +endw sql select * from $tb -print sql select * from $tb -> $rows points -if $rows != $N then +print sql select * from $tb -> $rows points +if $rows != $N then return -1 endi @@ -69,100 +69,100 @@ $end2 = 1601481600000 $end3 = 1601481600000 + $step sql select * from $tb where ts < $start1 and ts > $end1 -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts < $start1 and ts > $end2 -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts < $start1 and ts > $end3 -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts < $start2 and ts > $end1 print select * from $tb where ts < $start2 and ts > $end1 -> $rows points -if $rows != $result1 then +if $rows != $result1 then return -1 endi sql select * from $tb where ts < $start2 and ts > $end2 -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts < $start2 and ts > $end3 -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts < $start3 and ts > $end1 -print sql select * from $tb where ts < $start3 and ts > $end1 -> $rows points -if $rows != $result2 then +print sql select * from $tb where ts < $start3 and ts > $end1 -> $rows points +if $rows != $result2 then return -1 endi sql select * from $tb where ts < $start3 and ts > $end2 -print sql select * from $tb where ts < $start3 and ts > $end2 -> $rows points -if $rows != $result1 then +print sql select * from $tb where ts < $start3 and ts > $end2 -> $rows points +if $rows != $result1 then return -1 endi sql select * from $tb where ts < $start3 and ts > $end3 -if $rows != 0 then +if $rows != 0 then return -1 endi -print ================= order by ts desc +print ================= order by ts desc sql select * from $tb where ts < $start1 and ts > $end1 order by ts desc -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts < $start1 and ts > $end2 order by ts desc -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts < $start1 and ts > $end3 order by ts desc -if $rows != 0 then +if $rows != 0 then return -1 endi -sql select * from $tb where ts < $start2 and ts > $end1 order by ts desc +sql select * from $tb where ts < $start2 and ts > $end1 order by ts desc print select * from $tb where ts < $start2 and ts > $end1 order by ts desc -> $rows points -if $rows != $result1 then +if $rows != $result1 then return -1 endi sql select * from $tb where ts < $start2 and ts > $end2 order by ts desc -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts < $start2 and ts > $end3 order by ts desc -if $rows != 0 then +if $rows != 0 then return -1 endi -sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc -print sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc -> $rows points -if $rows != $result2 then +sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc +print sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc -> $rows points +if $rows != $result2 then return -1 endi -sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc -print sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc -> $rows points -if $rows != $result1 then +sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc +print sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc -> $rows points +if $rows != $result1 then return -1 endi sql select * from $tb where ts < $start3 and ts > $end3 order by ts desc -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -170,8 +170,8 @@ clear: sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/insert/query_block2_file.sim b/tests/script/tsim/insert/query_block2_file.sim index c87262ab143b3cac842196576f14dcce6fd84c2c..804051c44ac4adc7be3fea896a017d6ebb513b56 100644 --- a/tests/script/tsim/insert/query_block2_file.sim +++ b/tests/script/tsim/insert/query_block2_file.sim @@ -16,23 +16,23 @@ sql create database $db sql use $db sql create table $tb (ts timestamp, speed int) -#commit to file will trigger if insert 82 rows -$N = 82 +#commit to file will trigger if insert 82 rows +$N = 82 print =============== step 1 $x = $N * 2 $y = $N $expect = $N while $x > $y - $ms = $x . m - $xt = - . $x - sql insert into $tb values (now - $ms , $xt ) + $ms = $x . m + $xt = - . $x + sql insert into $tb values (now - $ms , $xt ) $x = $x - 1 -endw +endw sql select * from $tb -print sql select * from $tb -> $rows points -if $rows != $expect then +print sql select * from $tb -> $rows points +if $rows != $expect then return -1 endi @@ -40,20 +40,20 @@ $x = $N $y = $N * 2 $expect = $N * 2 while $x < $y - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 -endw +endw sql select * from $tb -print sql select * from $tb -> $rows points -if $rows != $expect then +print sql select * from $tb -> $rows points +if $rows != $expect then return -1 endi print =============== step 2 $R = 4 -$y = $N * $R +$y = $N * $R $expect = $y + $N $expect = $expect + $N @@ -62,17 +62,17 @@ $x = $N * 3 $y = $y + $x while $x < $y - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 -endw +endw sql select * from $tb -print sql select * from $tb -> $rows points -if $rows != $expect then +print sql select * from $tb -> $rows points +if $rows != $expect then return -1 endi - + print =============== step 2 @@ -106,7 +106,7 @@ endi sql select * from $tb where ts < $start2 and ts > $end1 print select * from $tb where ts < $start2 and ts > $end1 -> $rows points -if $rows != $result1 then +if $rows != $result1 then return -1 endi @@ -121,14 +121,14 @@ if $rows != 0 then endi sql select * from $tb where ts < $start3 and ts > $end1 -print sql select * from $tb where ts < $start3 and ts > $end1 -> $rows points -if $rows != $result2 then +print sql select * from $tb where ts < $start3 and ts > $end1 -> $rows points +if $rows != $result2 then return -1 endi sql select * from $tb where ts < $start3 and ts > $end2 -print sql select * from $tb where ts < $start3 and ts > $end2 -> $rows points -if $rows != $result1 then +print sql select * from $tb where ts < $start3 and ts > $end2 -> $rows points +if $rows != $result1 then return -1 endi @@ -137,7 +137,7 @@ if $rows != 0 then return -1 endi -print ================= order by ts desc +print ================= order by ts desc sql select * from $tb where ts < $start1 and ts > $end1 order by ts desc if $rows != 0 then @@ -154,9 +154,9 @@ if $rows != 0 then return -1 endi -sql select * from $tb where ts < $start2 and ts > $end1 order by ts desc +sql select * from $tb where ts < $start2 and ts > $end1 order by ts desc print select * from $tb where ts < $start2 and ts > $end1 order by ts desc -> $rows points -if $rows != $result1 then +if $rows != $result1 then return -1 endi @@ -170,15 +170,15 @@ if $rows != 0 then return -1 endi -sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc -print sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc -> $rows points -if $rows != $result2 then +sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc +print sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc -> $rows points +if $rows != $result2 then return -1 endi -sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc -print sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc -> $rows points -if $rows != $result1 then +sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc +print sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc -> $rows points +if $rows != $result1 then return -1 endi @@ -191,8 +191,8 @@ clear: sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/insert/query_block2_memory.sim b/tests/script/tsim/insert/query_block2_memory.sim index f919a2a61f2c03c344e7fe53cf90d43ba43881cd..2504e905b79bd53b87ac245bb2f8d4d1f46d7d4c 100644 --- a/tests/script/tsim/insert/query_block2_memory.sim +++ b/tests/script/tsim/insert/query_block2_memory.sim @@ -14,22 +14,22 @@ sql drop database -x step1 step1: sql create database $db sql use $db -sql create table $tb (ts timestamp, speed int) +sql create table $tb (ts timestamp, speed int) -$N = 82 +$N = 82 $x = $N * 2 $y = $N while $x > $y - $ms = $x . m - $xt = - . $x - sql insert into $tb values (now - $ms , $xt ) + $ms = $x . m + $xt = - . $x + sql insert into $tb values (now - $ms , $xt ) $x = $x - 1 -endw +endw sql select * from $tb -print sql select * from $tb -> $rows points -if $rows != $y then +print sql select * from $tb -> $rows points +if $rows != $y then return -1 endi @@ -37,13 +37,13 @@ $x = $N $y = $N * 2 $expect = $N * 2 while $x < $y - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 -endw +endw sql select * from $tb -print sql select * from $tb -> $rows points -if $rows != $expect then +print sql select * from $tb -> $rows points +if $rows != $expect then return -1 endi @@ -63,107 +63,107 @@ $end2 = now $end3 = now+ . $step sql select * from $tb where ts < $start1 and ts > $end1 -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts < $start1 and ts > $end2 -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts < $start1 and ts > $end3 -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts < $start2 and ts > $end1 print select * from $tb where ts < $start2 and ts > $end1 -> $rows points -if $rows != $result1 then +if $rows != $result1 then return -1 endi sql select * from $tb where ts < $start2 and ts > $end2 -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts < $start2 and ts > $end3 -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts < $start3 and ts > $end1 print sql select * from $tb where ts < $start3 and ts > $end1 -> $rows points -if $rows != $result2 then +if $rows != $result2 then return -1 endi sql select * from $tb where ts < $start3 and ts > $end2 -print sql select * from $tb where ts < $start3 and ts > $end2 -> $rows points -if $rows != $result1 then +print sql select * from $tb where ts < $start3 and ts > $end2 -> $rows points +if $rows != $result1 then return -1 endi sql select * from $tb where ts < $start3 and ts > $end3 -if $rows != 0 then +if $rows != 0 then return -1 endi -print ================= order by ts desc +print ================= order by ts desc sql select * from $tb where ts < $start1 and ts > $end1 order by ts desc -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts < $start1 and ts > $end2 order by ts desc -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts < $start1 and ts > $end3 order by ts desc -if $rows != 0 then +if $rows != 0 then return -1 endi -sql select * from $tb where ts < $start2 and ts > $end1 order by ts desc +sql select * from $tb where ts < $start2 and ts > $end1 order by ts desc print select * from $tb where ts < $start2 and ts > $end1 order by ts desc -> $rows points -if $rows != $result1 then +if $rows != $result1 then return -1 endi sql select * from $tb where ts < $start2 and ts > $end2 order by ts desc -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts < $start2 and ts > $end3 order by ts desc -if $rows != 0 then +if $rows != 0 then return -1 endi -sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc -print sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc -> $rows points -if $rows != $result2 then +sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc +print sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc -> $rows points +if $rows != $result2 then return -1 endi -sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc -print sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc -> $rows points -if $rows != $result1 then +sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc +print sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc -> $rows points +if $rows != $result1 then return -1 endi sql select * from $tb where ts < $start3 and ts > $end3 order by ts desc -if $rows != 0 then +if $rows != 0 then return -1 endi sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/insert/query_file_memory.sim b/tests/script/tsim/insert/query_file_memory.sim index 0d37484494e9dadc98b4f2aea1e2956d09b5c7de..325d7977b65173190718365c2f753a69fb52ebbe 100644 --- a/tests/script/tsim/insert/query_file_memory.sim +++ b/tests/script/tsim/insert/query_file_memory.sim @@ -17,23 +17,23 @@ sql use $db sql create table $tb (ts timestamp, speed int) -#commit to file will trigger if insert 82 rows +#commit to file will trigger if insert 82 rows -$N = 82 +$N = 82 $x = $N * 2 $y = $N $expect = $y while $x > $y - $ms = $x . m - $xt = - . $x - sql insert into $tb values (now - $ms , $xt ) + $ms = $x . m + $xt = - . $x + sql insert into $tb values (now - $ms , $xt ) $x = $x - 1 -endw +endw sql select * from $tb -print sql select * from $tb -> $rows points -if $rows != $expect then +print sql select * from $tb -> $rows points +if $rows != $expect then return -1 endi @@ -41,37 +41,37 @@ $x = $N $y = $N * 2 $expect = $N * 2 while $x < $y - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 -endw +endw sql select * from $tb -print sql select * from $tb -> $rows points -if $rows != $expect then +print sql select * from $tb -> $rows points +if $rows != $expect then return -1 endi $R = 4 $R = $R - 1 -$y = $N * $R +$y = $N * $R $expect = $y + $N $expect = $expect + $N $x = $N * 3 $y = $y + $x while $x < $y - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 -endw +endw sql select * from $tb -print sql select * from $tb -> $rows points -if $rows != $expect then +print sql select * from $tb -> $rows points +if $rows != $expect then return -1 endi - + print =============== step 2 @@ -105,7 +105,7 @@ endi sql select * from $tb where ts < $start2 and ts > $end1 print select * from $tb where ts < $start2 and ts > $end1 -> $rows points -if $rows != $result1 then +if $rows != $result1 then return -1 endi @@ -120,14 +120,14 @@ if $rows != 0 then endi sql select * from $tb where ts < $start3 and ts > $end1 -print sql select * from $tb where ts < $start3 and ts > $end1 -> $rows points -if $rows != $result2 then +print sql select * from $tb where ts < $start3 and ts > $end1 -> $rows points +if $rows != $result2 then return -1 endi sql select * from $tb where ts < $start3 and ts > $end2 -print sql select * from $tb where ts < $start3 and ts > $end2 -> $rows points -if $rows != $result1 then +print sql select * from $tb where ts < $start3 and ts > $end2 -> $rows points +if $rows != $result1 then return -1 endi @@ -136,7 +136,7 @@ if $rows != 0 then return -1 endi -print ================= order by ts desc +print ================= order by ts desc sql select * from $tb where ts < $start1 and ts > $end1 order by ts desc if $rows != 0 then @@ -153,9 +153,9 @@ if $rows != 0 then return -1 endi -sql select * from $tb where ts < $start2 and ts > $end1 order by ts desc +sql select * from $tb where ts < $start2 and ts > $end1 order by ts desc print select * from $tb where ts < $start2 and ts > $end1 order by ts desc -> $rows points -if $rows != $result1 then +if $rows != $result1 then return -1 endi @@ -169,15 +169,15 @@ if $rows != 0 then return -1 endi -sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc -print sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc -> $rows points -if $rows != $result2 then +sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc +print sql select * from $tb where ts < $start3 and ts > $end1 order by ts desc -> $rows points +if $rows != $result2 then return -1 endi -sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc -print sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc -> $rows points -if $rows != $result1 then +sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc +print sql select * from $tb where ts < $start3 and ts > $end2 order by ts desc -> $rows points +if $rows != $result1 then return -1 endi @@ -190,8 +190,8 @@ clear: sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/insert/query_multi_file.sim b/tests/script/tsim/insert/query_multi_file.sim index 750eb040293b381cc98cd38da76931d93a1c7617..1b3ad57c8e47c0eaf702be6e3c2f4a10c5d6f1a0 100644 --- a/tests/script/tsim/insert/query_multi_file.sim +++ b/tests/script/tsim/insert/query_multi_file.sim @@ -15,18 +15,18 @@ step1: sql create database $db sql use $db -sql create table $tb (ts timestamp, speed int) +sql create table $tb (ts timestamp, speed int) $N = 20000 $x = 0 while $x < $N - $ms = $x . s + $ms = $x . s #print insert into $tb values (now + $ms , $x ) sql insert into $tb values (now + $ms , $x ) -x error_insert $x = $x + 1 -endw +endw error_insert: sql select * from $tb @@ -37,8 +37,8 @@ endi sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/insert/tcp.sim b/tests/script/tsim/insert/tcp.sim index 2dc720a0d455840831e70966515096bc9471e039..7eb06e82fb9921450e684813210c514387a3e3bf 100644 --- a/tests/script/tsim/insert/tcp.sim +++ b/tests/script/tsim/insert/tcp.sim @@ -21,8 +21,8 @@ while $x < 10000 $ms = $x . s sql insert into tb values (now + $ms , '1' ) $x = $x + 1 -endw +endw sql select * from tb -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/insert/update0.sim b/tests/script/tsim/insert/update0.sim index c4bd29615bd7b0ca14d8b0d67588618ad29fd945..6384b5a21f060796d68de57dcf4f65da6ae74cd9 100644 --- a/tests/script/tsim/insert/update0.sim +++ b/tests/script/tsim/insert/update0.sim @@ -11,7 +11,7 @@ print =============== create super table sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)); sql show stables -if $rows != 1 then +if $rows != 1 then return -1 endi @@ -20,7 +20,7 @@ sql create table ct1 using stb tags("BeiJing", "ChaoYang") sql create table ct2 using stb tags("BeiJing", "HaiDian") sql show tables -if $rows != 2 then +if $rows != 2 then return -1 endi @@ -43,7 +43,7 @@ print $data30 $data31 print $data40 $data41 print $data50 $data51 -if $rows != 6 then +if $rows != 6 then print rows $rows != 6 return -1 endi @@ -74,7 +74,7 @@ print $data00 $data01 print $data10 $data11 print $data20 $data21 -if $rows != 3 then +if $rows != 3 then print rows $rows != 3 return -1 endi @@ -107,7 +107,7 @@ print $data30 $data31 print $data40 $data41 print $data50 $data51 -if $rows != 6 then +if $rows != 6 then print rows $rows != 6 return -1 endi @@ -133,7 +133,7 @@ print $data00 $data01 print $data10 $data11 print $data20 $data21 -if $rows != 3 then +if $rows != 3 then print rows $rows != 3 return -1 endi @@ -166,7 +166,7 @@ print $data30 $data31 print $data40 $data41 print $data50 $data51 -if $rows != 6 then +if $rows != 6 then print rows $rows != 6 return -1 endi @@ -198,7 +198,7 @@ print $data20 $data21 print $data30 $data31 print $data40 $data41 -if $rows != 5 then +if $rows != 5 then print rows $rows != 5 return -1 endi @@ -228,4 +228,4 @@ if $data41 != NULL then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/insert/update1_sort_merge.sim b/tests/script/tsim/insert/update1_sort_merge.sim index 13462520eaf173e5df0badcb6430152ea1a05321..5a69a01acbd96707ee917ac49d67c5c2376796fe 100644 --- a/tests/script/tsim/insert/update1_sort_merge.sim +++ b/tests/script/tsim/insert/update1_sort_merge.sim @@ -13,7 +13,7 @@ print =============== create super table sql create table if not exists stb (ts timestamp, c1 int unsigned, c2 double, c3 binary(10), c4 nchar(10), c5 double) tags (city binary(20),district binary(20)); sql show stables -if $rows != 1 then +if $rows != 1 then return -1 endi @@ -24,7 +24,7 @@ sql create table ct3 using stb tags("BeiJing", "PingGu") sql create table ct4 using stb tags("BeiJing", "YanQing") sql show tables -if $rows != 4 then +if $rows != 4 then print rows $rows != 4 return -1 endi @@ -69,7 +69,7 @@ print $data30 $data31 $data32 $data33 $data34 $data35 print $data40 $data41 $data42 $data43 $data44 $data45 print $data50 $data51 $data52 $data53 $data54 $data55 -if $rows != 6 then +if $rows != 6 then print rows $rows != 6 return -1 endi @@ -154,7 +154,7 @@ print =============== step 6 query records of ct2 from memory(taosc and taosd me sql select * from ct2; print $data00 $data01 $data02 $data03 $data04 $data05 -if $rows != 1 then +if $rows != 1 then print rows $rows != 1 return -1 endi @@ -196,12 +196,12 @@ print $data60 $data61 $data62 $data63 $data64 $data65 print $data70 $data71 $data72 $data73 $data74 $data75 print $data80 $data81 $data82 $data83 $data84 $data85 print $data90 $data91 $data92 $data93 $data94 $data95 -print $data[10][0] $data[10][1] $data[10][2] $data[10][3] $data[10][4] $data[10][5] -print $data[11][0] $data[11][1] $data[11][2] $data[11][3] $data[11][4] $data[11][5] -print $data[12][0] $data[12][1] $data[12][2] $data[12][3] $data[12][4] $data[12][5] -print $data[13][0] $data[13][1] $data[13][2] $data[13][3] $data[13][4] $data[13][5] +print $data[10][0] $data[10][1] $data[10][2] $data[10][3] $data[10][4] $data[10][5] +print $data[11][0] $data[11][1] $data[11][2] $data[11][3] $data[11][4] $data[11][5] +print $data[12][0] $data[12][1] $data[12][2] $data[12][3] $data[12][4] $data[12][5] +print $data[13][0] $data[13][1] $data[13][2] $data[13][3] $data[13][4] $data[13][5] -if $rows != 14 then +if $rows != 14 then print rows $rows != 14 return -1 endi @@ -356,7 +356,7 @@ print $data30 $data31 $data32 $data33 $data34 $data35 print $data40 $data41 $data42 $data43 $data44 $data45 -if $rows != 5 then +if $rows != 5 then print rows $rows != 5 return -1 endi @@ -451,7 +451,7 @@ print $data30 $data31 $data32 $data33 $data34 $data35 print $data40 $data41 $data42 $data43 $data44 $data45 print $data50 $data51 $data52 $data53 $data54 $data55 -if $rows != 6 then +if $rows != 6 then print rows $rows != 6 return -1 endi @@ -536,7 +536,7 @@ print =============== step 10 query records of ct2 from file sql select * from ct2; print $data00 $data01 $data02 $data03 $data04 $data05 -if $rows != 1 then +if $rows != 1 then print rows $rows != 1 return -1 endi @@ -578,12 +578,12 @@ print $data60 $data61 $data62 $data63 $data64 $data65 print $data70 $data71 $data72 $data73 $data74 $data75 print $data80 $data81 $data82 $data83 $data84 $data85 print $data90 $data91 $data92 $data93 $data94 $data95 -print $data[10][0] $data[10][1] $data[10][2] $data[10][3] $data[10][4] $data[10][5] -print $data[11][0] $data[11][1] $data[11][2] $data[11][3] $data[11][4] $data[11][5] -print $data[12][0] $data[12][1] $data[12][2] $data[12][3] $data[12][4] $data[12][5] -print $data[13][0] $data[13][1] $data[13][2] $data[13][3] $data[13][4] $data[13][5] +print $data[10][0] $data[10][1] $data[10][2] $data[10][3] $data[10][4] $data[10][5] +print $data[11][0] $data[11][1] $data[11][2] $data[11][3] $data[11][4] $data[11][5] +print $data[12][0] $data[12][1] $data[12][2] $data[12][3] $data[12][4] $data[12][5] +print $data[13][0] $data[13][1] $data[13][2] $data[13][3] $data[13][4] $data[13][5] -if $rows != 14 then +if $rows != 14 then print rows $rows != 14 return -1 endi @@ -738,7 +738,7 @@ print $data30 $data31 $data32 $data33 $data34 $data35 print $data40 $data41 $data42 $data43 $data44 $data45 -if $rows != 5 then +if $rows != 5 then print rows $rows != 5 return -1 endi @@ -818,4 +818,4 @@ if $data44 != n8 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/parser/alter1.sim b/tests/script/tsim/parser/alter1.sim index 9d0049e45e5437d9d6de814b744d8fce3ccd876e..369419dcd9cd91688f39c27dbd54c33ee0699ae8 100644 --- a/tests/script/tsim/parser/alter1.sim +++ b/tests/script/tsim/parser/alter1.sim @@ -130,4 +130,4 @@ endi # return -1 #endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/parser/binary_escapeCharacter.sim b/tests/script/tsim/parser/binary_escapeCharacter.sim index 0b437d8b04a39a400b25368263f88c2b846c155a..5a9c0e7bb1d2b141639a1408ffcc4ae064dd78f8 100644 --- a/tests/script/tsim/parser/binary_escapeCharacter.sim +++ b/tests/script/tsim/parser/binary_escapeCharacter.sim @@ -101,4 +101,4 @@ sql_error insert into tb values(now, '\'); #sql_error insert into tb values(now, '\\\n'); sql insert into tb values(now, '\n'); -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/parser/col_arithmetic_operation.sim b/tests/script/tsim/parser/col_arithmetic_operation.sim index f22beefdf88c3d90bff8554cc44b5768bfef3d1e..9a2ba34c85e552585770bb42913b8c83ddd58131 100644 --- a/tests/script/tsim/parser/col_arithmetic_operation.sim +++ b/tests/script/tsim/parser/col_arithmetic_operation.sim @@ -132,4 +132,4 @@ sql_error select max(c1-c2) from $tb print =====================> td-1764 sql select sum(c1)/count(*), sum(c1) as b, count(*) as b from $stb interval(1y) -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/parser/columnValue_bigint.sim b/tests/script/tsim/parser/columnValue_bigint.sim index 2cf0151a053f3c62b7ab904156361c8705dc554e..0a024029a534232aa5829d0cf59807f8c65d71f2 100644 --- a/tests/script/tsim/parser/columnValue_bigint.sim +++ b/tests/script/tsim/parser/columnValue_bigint.sim @@ -373,7 +373,7 @@ sql_error insert into st_bigint_e7 values (now, "123abc") sql_error insert into st_bigint_e9 values (now, abc) sql_error insert into st_bigint_e10 values (now, "abc") sql_error insert into st_bigint_e11 values (now, " ") -sql insert into st_bigint_e12 values (now, '') +sql_error insert into st_bigint_e12 values (now, '') sql_error insert into st_bigint_e13 using mt_bigint tags (033) values (now, 9223372036854775808) sql insert into st_bigint_e14 using mt_bigint tags (033) values (now, -9223372036854775808) @@ -386,7 +386,7 @@ sql_error insert into st_bigint_e20 using mt_bigint tags (033) values (now, "123 sql_error insert into st_bigint_e22 using mt_bigint tags (033) values (now, abc) sql_error insert into st_bigint_e23 using mt_bigint tags (033) values (now, "abc") sql_error insert into st_bigint_e24 using mt_bigint tags (033) values (now, " ") -sql insert into st_bigint_e25 using mt_bigint tags (033) values (now, '') +sql_error insert into st_bigint_e25 using mt_bigint tags (033) values (now, '') sql_error insert into st_bigint_e13_0 using mt_bigint tags (9223372036854775808) values (now, -033) sql insert into st_bigint_e14_0 using mt_bigint tags (-9223372036854775808) values (now, -033) diff --git a/tests/script/tsim/parser/columnValue_double.sim b/tests/script/tsim/parser/columnValue_double.sim index da09b77353fc12f237e4fb94dc99b69bd8bec9c1..bfcf338faca3669b18b2e821f10b5e3b4b0f328e 100644 --- a/tests/script/tsim/parser/columnValue_double.sim +++ b/tests/script/tsim/parser/columnValue_double.sim @@ -476,7 +476,7 @@ sql_error insert into st_double_e7 values (now, "123abc") sql_error insert into st_double_e9 values (now, abc) sql_error insert into st_double_e10 values (now, "abc") sql_error insert into st_double_e11 values (now, " ") -sql insert into st_double_e12 values (now, '') +sql_error insert into st_double_e12 values (now, '') sql_error insert into st_double_e13 using mt_double tags (033) values (now, 11.7976931348623157e+308) sql_error insert into st_double_e14 using mt_double tags (033) values (now, -11.7976931348623157e+308) @@ -489,7 +489,7 @@ sql_error insert into st_double_e20 using mt_double tags (033) values (now, "123 sql_error insert into st_double_e22 using mt_double tags (033) values (now, abc) sql_error insert into st_double_e23 using mt_double tags (033) values (now, "abc") sql_error insert into st_double_e24 using mt_double tags (033) values (now, " ") -sql insert into st_double_e25_1 using mt_double tags (033) values (now, '') +sql_error insert into st_double_e25_1 using mt_double tags (033) values (now, '') sql_error insert into st_double_e13 using mt_double tags (31.7976931348623157e+308) values (now, -033) sql_error insert into st_double_e14 using mt_double tags (-31.7976931348623157e+308) values (now, -033) diff --git a/tests/script/tsim/parser/columnValue_float.sim b/tests/script/tsim/parser/columnValue_float.sim index 3e20e178c373b9bf55b4be0e666e27e3d5787447..b2db7dff2b875d8839e1bdf356c40ba4d6d9e916 100644 --- a/tests/script/tsim/parser/columnValue_float.sim +++ b/tests/script/tsim/parser/columnValue_float.sim @@ -506,7 +506,7 @@ sql_error insert into st_float_e7 values (now, "123abc") sql_error insert into st_float_e9 values (now, abc) sql_error insert into st_float_e10 values (now, "abc") sql_error insert into st_float_e11 values (now, " ") -sql insert into st_float_e12 values (now, '') +sql_error insert into st_float_e12 values (now, '') sql_error insert into st_float_e13 using mt_float tags (033) values (now, 3.50282347e+38) sql_error insert into st_float_e14 using mt_float tags (033) values (now, -3.50282347e+38) @@ -519,7 +519,7 @@ sql_error insert into st_float_e20 using mt_float tags (033) values (now, "123ab sql_error insert into st_float_e22 using mt_float tags (033) values (now, abc) sql_error insert into st_float_e23 using mt_float tags (033) values (now, "abc") sql_error insert into st_float_e24 using mt_float tags (033) values (now, " ") -sql insert into st_float_e25_1 using mt_float tags (033) values (now, '') +sql_error insert into st_float_e25_1 using mt_float tags (033) values (now, '') sql_error insert into st_float_e13 using mt_float tags (3.50282347e+38) values (now, -033) sql_error insert into st_float_e14 using mt_float tags (-3.50282347e+38) values (now, -033) diff --git a/tests/script/tsim/parser/columnValue_int.sim b/tests/script/tsim/parser/columnValue_int.sim index 009fbd1ede718245b6145b9a292d9c325d2a872c..4a3b8ebd0ba46a08ee737de44cf07c1cb10aa555 100644 --- a/tests/script/tsim/parser/columnValue_int.sim +++ b/tests/script/tsim/parser/columnValue_int.sim @@ -371,7 +371,7 @@ sql_error insert into st_int_e7 values (now, "123abc") sql_error insert into st_int_e9 values (now, abc) sql_error insert into st_int_e10 values (now, "abc") sql_error insert into st_int_e11 values (now, " ") -sql insert into st_int_e12 values (now, '') +sql_error insert into st_int_e12 values (now, '') sql_error insert into st_int_e13 using mt_int tags (033) values (now, 2147483648) sql insert into st_int_e14 using mt_int tags (033) values (now, -2147483648) @@ -384,7 +384,7 @@ sql_error insert into st_int_e20 using mt_int tags (033) values (now, "123abc") sql_error insert into st_int_e22 using mt_int tags (033) values (now, abc) sql_error insert into st_int_e23 using mt_int tags (033) values (now, "abc") sql_error insert into st_int_e24 using mt_int tags (033) values (now, " ") -sql insert into st_int_e25 using mt_int tags (033) values (now, '') +sql_error insert into st_int_e25 using mt_int tags (033) values (now, '') sql_error insert into st_int_e13 using mt_int tags (2147483648) values (now, -033) sql insert into st_int_e14_1 using mt_int tags (-2147483648) values (now, -033) diff --git a/tests/script/tsim/parser/columnValue_smallint.sim b/tests/script/tsim/parser/columnValue_smallint.sim index 0dcb0d85f4f9dad62dc71aa7fd2df916c6fd7a63..eb364f36302df811549a968eaf5bf05a823b84b3 100644 --- a/tests/script/tsim/parser/columnValue_smallint.sim +++ b/tests/script/tsim/parser/columnValue_smallint.sim @@ -374,7 +374,7 @@ sql_error insert into st_smallint_e7 values (now, "123abc") sql_error insert into st_smallint_e9 values (now, abc) sql_error insert into st_smallint_e10 values (now, "abc") sql_error insert into st_smallint_e11 values (now, " ") -sql insert into st_smallint_e12 values (now, '') +sql_error insert into st_smallint_e12 values (now, '') sql_error insert into st_smallint_e13 using mt_smallint tags (033) values (now, 32768) sql insert into st_smallint_e14_1 using mt_smallint tags (033) values (now, -32768) @@ -387,7 +387,7 @@ sql_error insert into st_smallint_e20 using mt_smallint tags (033) values (now, sql_error insert into st_smallint_e22 using mt_smallint tags (033) values (now, abc) sql_error insert into st_smallint_e23 using mt_smallint tags (033) values (now, "abc") sql_error insert into st_smallint_e24 using mt_smallint tags (033) values (now, " ") -sql insert into st_smallint_e25_1 using mt_smallint tags (033) values (now, '') +sql_error insert into st_smallint_e25_1 using mt_smallint tags (033) values (now, '') sql_error insert into st_smallint_e13 using mt_smallint tags (32768) values (now, -033) sql insert into st_smallint_e14 using mt_smallint tags (-32768) values (now, -033) diff --git a/tests/script/tsim/parser/columnValue_tinyint.sim b/tests/script/tsim/parser/columnValue_tinyint.sim index 62ae4e5228f94e466dd0bc44e681b7b4b940fcdb..d7938aa739fb3584c8fd549005bd02e44ac39bc0 100644 --- a/tests/script/tsim/parser/columnValue_tinyint.sim +++ b/tests/script/tsim/parser/columnValue_tinyint.sim @@ -372,7 +372,7 @@ sql_error insert into st_tinyint_e7 values (now, "123abc") sql_error insert into st_tinyint_e9 values (now, abc) sql_error insert into st_tinyint_e10 values (now, "abc") sql_error insert into st_tinyint_e11 values (now, " ") -sql insert into st_tinyint_e12 values (now, '') +sql_error insert into st_tinyint_e12 values (now, '') sql_error insert into st_tinyint_e13 using mt_tinyint tags (033) values (now, 128) sql insert into st_tinyint_e14_1 using mt_tinyint tags (033) values (now, -128) @@ -385,7 +385,7 @@ sql_error insert into st_tinyint_e20 using mt_tinyint tags (033) values (now, "1 sql_error insert into st_tinyint_e22 using mt_tinyint tags (033) values (now, abc) sql_error insert into st_tinyint_e23 using mt_tinyint tags (033) values (now, "abc") sql_error insert into st_tinyint_e24 using mt_tinyint tags (033) values (now, " ") -sql insert into st_tinyint_e25_2 using mt_tinyint tags (033) values (now, '') +sql_error insert into st_tinyint_e25_2 using mt_tinyint tags (033) values (now, '') sql_error insert into st_tinyint_e13 using mt_tinyint tags (128) values (now, -033) sql insert into st_tinyint_e14 using mt_tinyint tags (-128) values (now, -033) diff --git a/tests/script/tsim/parser/columnValue_unsign.sim b/tests/script/tsim/parser/columnValue_unsign.sim index 758814bc2b662998f5074dc36dbf45cf67ae41d7..7ae1b20eca18236c71277ae2c94a0976181a271a 100644 --- a/tests/script/tsim/parser/columnValue_unsign.sim +++ b/tests/script/tsim/parser/columnValue_unsign.sim @@ -76,17 +76,16 @@ if $data03 != NULL then return -1 endi -sql insert into mt_unsigned_1 values(now, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); -sql insert into mt_unsigned_1 values(now+1s, 1, 2, 3, 4, 5, 6, 7, 8, 9); - -sql_error insert into mt_unsigned_1 values(now, -1, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); -sql_error insert into mt_unsigned_1 values(now, NULL, -1, NULL, NULL, NULL, NULL, NULL, NULL, NULL); -sql_error insert into mt_unsigned_1 values(now, NULL, NULL, -1, NULL, NULL, NULL, NULL, NULL, NULL); -sql_error insert into mt_unsigned_1 values(now, NULL, NULL, NULL, -1, NULL, NULL, NULL, NULL, NULL); -sql insert into mt_unsigned_1 values(now, 255, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); -sql insert into mt_unsigned_1 values(now, NULL, 65535, NULL, NULL, NULL, NULL, NULL, NULL, NULL); -sql insert into mt_unsigned_1 values(now, NULL, NULL, 4294967295, NULL, NULL, NULL, NULL, NULL, NULL); -sql insert into mt_unsigned_1 values(now, NULL, NULL, NULL, 18446744073709551615, NULL, NULL, NULL, NULL, NULL); +sql insert into mt_unsigned_1 values(now+1s, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); +sql insert into mt_unsigned_1 values(now+2s, 1, 2, 3, 4, 5, 6, 7, 8, 9); +sql_error insert into mt_unsigned_1 values(now+3s, -1, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); +sql_error insert into mt_unsigned_1 values(now+4s, NULL, -1, NULL, NULL, NULL, NULL, NULL, NULL, NULL); +sql_error insert into mt_unsigned_1 values(now+5s, NULL, NULL, -1, NULL, NULL, NULL, NULL, NULL, NULL); +sql_error insert into mt_unsigned_1 values(now+6s, NULL, NULL, NULL, -1, NULL, NULL, NULL, NULL, NULL); +sql insert into mt_unsigned_1 values(now+7s, 255, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); +sql insert into mt_unsigned_1 values(now+8s, NULL, 65535, NULL, NULL, NULL, NULL, NULL, NULL, NULL); +sql insert into mt_unsigned_1 values(now+9s, NULL, NULL, 4294967295, NULL, NULL, NULL, NULL, NULL, NULL); +sql insert into mt_unsigned_1 values(now+10s, NULL, NULL, NULL, 18446744073709551615, NULL, NULL, NULL, NULL, NULL); sql select count(a),count(b),count(c),count(d), count(e) from mt_unsigned_1 if $rows != 1 then @@ -130,4 +129,4 @@ if $rows != 1 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/parser/fill.sim b/tests/script/tsim/parser/fill.sim index f688d815e79fb76ce536fd75a1312230306dda41..4892345e12ed4b22a1c3d96ae2e6233e7e9fe642 100644 --- a/tests/script/tsim/parser/fill.sim +++ b/tests/script/tsim/parser/fill.sim @@ -47,31 +47,10 @@ $tsu = $tsu + $ts0 ## fill syntax test # number of fill values exceeds number of selected columns -sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) -if $data11 != 6 then - return -1 -endi -if $data12 != 6 then - return -1 -endi -if $data13 != 6.00000 then - return -1 -endi -if $data14 != 6.000000000 then - return -1 -endi +sql_error select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) # number of fill values is smaller than number of selected columns -sql select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6) -if $data11 != 6 then - return -1 -endi -if $data12 != 6 then - return -1 -endi -if $data13 != 6.00000 then - return -1 -endi +sql_error select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6) # unspecified filling method sql_error select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill (6, 6, 6, 6, 6) @@ -182,7 +161,7 @@ endi # min_with_fill print min_with_fill -sql select _wstart, min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +sql select _wstart, min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6) if $rows != 9 then return -1 endi @@ -216,7 +195,7 @@ endi # first_with_fill print first_with_fill -sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, '6', '6') if $rows != 9 then return -1 endi @@ -305,7 +284,7 @@ endi # last_with_fill print last_with_fill -sql select _wstart, last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +sql select _wstart, last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, '6', '6') if $rows != 9 then return -1 endi @@ -351,7 +330,7 @@ if $data11 != -1 then endi # fill_char_values_to_arithmetic_fields -sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c') +sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c') # fill_multiple_columns sql_error select sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc) @@ -376,37 +355,25 @@ endi # fill_into_nonarithmetic_fieds print select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000) -sql select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000) -if $data01 != 1 then - return -1 -endi -if $data11 != NULL then - return -1 -endi +sql_error select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000) -sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1) -sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1) -sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1) +sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1) +sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1) +sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1) sql select first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1') # fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24 # fill values into binary or nchar columns will be set to NULL automatically Note:2018-10-24 sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1') -sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true) +sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true) sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true') # fill nonarithmetic values into arithmetic fields sql_error select count(*) where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc); -sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true'); +sql_error select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true'); print select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1'); -sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1'); -if $rows != 9 then - return -1 -endi -if $data01 != 1 then - return -1 -endi +sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1'); sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1); if $rows != 9 then @@ -416,13 +383,7 @@ if $data01 != 1 then return -1 endi -sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10'); -if $rows != 9 then - return -1 -endi -if $data01 != 1 then - return -1 -endi +sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10'); ## linear fill # feature currently switched off 2018/09/29 @@ -859,8 +820,8 @@ sql insert into tm0 values('2020-1-1 1:3:8', 8); sql insert into tm0 values('2020-1-1 1:3:9', 9); sql insert into tm0 values('2020-1-1 1:4:10', 10); -print select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85); -sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85); +print select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90); +sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90); if $rows != 8 then return -1 endi @@ -958,14 +919,14 @@ if $data12 != NULL then return -1 endi -sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 4:2:15' interval(500a) fill(value, 99,91,90,89,88,87,86,85) ; +sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 4:2:15' interval(500a) fill(value, 99,91,90) ; if $rows != 21749 then print expect 21749, actual: $rows return -1 endi -print select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85) ; -sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85) ; +print select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89) ; +sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89) ; if $rows != 8 then return -1 endi diff --git a/tests/script/tsim/parser/fill_stb.sim b/tests/script/tsim/parser/fill_stb.sim index 51ae6f4b4153f59356a73edbcf7002c774fd8c05..6c61631aa8b3a682b75317943ddeb3642720f588 100644 --- a/tests/script/tsim/parser/fill_stb.sim +++ b/tests/script/tsim/parser/fill_stb.sim @@ -136,7 +136,8 @@ if $data74 != -4.00000 then endi ## fill(value) + group by -sql select max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2, -3, -4, -5, -6, -7, -8) group by t1 +print select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(value, -1, -2, -3, -4, -5, -6, -7, -8) +sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(value, -1, -2, -3, -4, -5, -6, -7, -8) $val = $rowNum * 2 print $rowNum, $val @@ -148,18 +149,13 @@ if $rows != 190 then print expect 190, actual:$rows return -1 endi -if $data06 != 0 then - return -1 -endi if $data11 != -1 then return -1 endi -#if $data16 != 0 then -# return -1 -#endi # number of fill values is smaller than number of selected columns -sql select max(c1), max(c2), max(c3) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6) +print select _wstart, max(c1), max(c2), max(c3) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6) +sql select _wstart, max(c1), max(c2), max(c3) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6) if $data11 != 6 then return -1 endi @@ -174,11 +170,11 @@ endi sql_error select max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill (6, 6, 6, 6, 6) # fill_char_values_to_arithmetic_fields -sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c') +sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c') # fill_multiple_columns sql_error select sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc) -sql select sum(c1), avg(c2), min(c3), max(c4) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99) +sql select _wstart, sum(c1), avg(c2), min(c3), max(c4) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99) $val = $rowNum * 2 $val = $val - 1 if $rows != $val then @@ -196,11 +192,14 @@ sql select * from $stb if $data09 != nchar0 then return -1 endi -sql select max(c4) from $stb where t1 > 4 and ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1) group by t1 -if $rows != 0 then - return -1 -endi -sql select min(c1), max(c4) from $stb where t1 > 4 and ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1) + +print select max(c4) from $stb where t1 > 4 and ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(value, -1) +sql select max(c4) from $stb where t1 > 4 and ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(value, -1) +#if $rows != 0 then +# return -1 +#endi + +sql select _wstart, min(c1), max(c4) from $stb where t1 > 4 and ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1) $val = $rowNum * 2 $val = $val - 1 if $rows != $val then @@ -223,11 +222,12 @@ if $data12 != -1.000000000 then endi # fill_into_nonarithmetic_fieds -sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000) +print select _wstart, first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000) +sql select _wstart, first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000) #if $data11 != 20000000 then -if $data11 != 1 then - return -1 -endi +#if $data11 != 1 then +# return -1 +#endi sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1) sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1) @@ -235,16 +235,15 @@ sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= sql select first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1') # fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24 # fill values into binary or nchar columns will be set to NULL automatically Note:2018-10-24 -sql_error select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1') +sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1') sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true) sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true') - # fill nonarithmetic values into arithmetic fields sql_error select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc); -sql_error select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true'); +sql select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true'); -sql select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '2e1'); +sql select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '2e1'); $val = $rowNum * 2 $val = $val - 1 if $rows != $val then @@ -253,11 +252,11 @@ endi if $data01 != $rowNum then return -1 endi -if $data11 != 20 then - return -1 -endi +#if $data11 != 20 then +# return -1 +#endi -sql select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 2e1); +sql select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 2e1); if $rows != $val then return -1 endi @@ -268,43 +267,44 @@ if $data11 != 20 then return -1 endi -sql select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '20'); +sql select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '20'); if $rows != $val then return -1 endi if $data01 != $rowNum then return -1 endi -if $data11 != 20 then - return -1 -endi +#if $data11 != 20 then +# return -1 +#endi ## linear fill -sql select max(c1), min(c2), avg(c3), sum(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(linear) group by t1 +sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(linear) $val = $rowNum * 2 $val = $val - 1 $val = $val * $tbNum if $rows != $val then return -1 endi -if $data08 != 0 then - return -1 -endi -if $data15 != NULL then - return -1 -endi -if $data16 != NULL then - return -1 -endi -if $data17 != NULL then - return -1 -endi -if $data18 != 0 then - return -1 -endi +#if $data08 != 0 then +# return -1 +#endi +#if $data15 != NULL then +# return -1 +#endi +#if $data16 != NULL then +# return -1 +#endi +#if $data17 != NULL then +# return -1 +#endi +#if $data18 != 0 then +# return -1 +#endi ## [TBASE-365] -sql select max(c1), min(c2), avg(c3), sum(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 interval(5m) fill(linear) group by t1 +sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(linear) +print select _wstart, max(c1), min(c2), avg(c3), sum(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(linear) if $rows != 95 then return -1 endi @@ -332,14 +332,8 @@ endi if $data17 != NULL then return -1 endi -if $data08 != 5 then - return -1 -endi -if $data18 != 5 then - return -1 -endi -sql select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(linear) +sql select _wstart, max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(linear) $val = $rowNum * 2 $val = $val - 1 if $rows != $val then @@ -359,7 +353,8 @@ endi ## previous fill print fill(prev) -sql select max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 interval(5m) fill(prev) group by t1 limit 5 +print select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(prev) limit 5 +sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(prev) limit 5 if $rows != 25 then return -1 endi @@ -372,69 +367,43 @@ endi if $data04 != NULL then return -1 endi -if $data09 != 5 then - return -1 -endi if $data12 != NULL then return -1 endi -if $data19 != 5 then - return -1 -endi if $data18 != nchar0 then return -1 endi -if $data59 != 6 then - return -1 -endi -if $data69 != 6 then - return -1 -endi ## NULL fill print fill(NULL) -sql select max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 interval(5m) fill(value, NULL) group by t1 limit 5 +print select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(value, NULL) limit 5 +sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(value, NULL) limit 5 if $rows != 25 then return -1 endi if $data01 != 0 then return -1 endi -if $data02 != NULL then - return -1 -endi -if $data04 != NULL then +if $data02 != 0 then return -1 endi if $data06 != 1 then return -1 endi -if $data09 != 5 then +if $data11 != 0 then return -1 endi -if $data11 != NULL then - return -1 -endi -if $data12 != NULL then - return -1 -endi -if $data19 != 5 then +if $data12 != 0 then return -1 endi if $data18 != NULL then return -1 endi -if $data59 != 6 then - return -1 -endi -if $data69 != 6 then - return -1 -endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/tsim/parser/fill_us.sim b/tests/script/tsim/parser/fill_us.sim index 0a45c02f58a039baa22d5c71fff04d8e56a6fed6..f760ba3577281fa358e0da9180624b7de2e69b76 100644 --- a/tests/script/tsim/parser/fill_us.sim +++ b/tests/script/tsim/parser/fill_us.sim @@ -48,32 +48,11 @@ $tsu = $tsu + $ts0 ## fill syntax test # number of fill values exceeds number of selected columns print select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) -sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) -if $data11 != 6 then - return -1 -endi -if $data12 != 6 then - return -1 -endi -if $data13 != 6.00000 then - return -1 -endi -if $data14 != 6.000000000 then - return -1 -endi +sql_error select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) # number of fill values is smaller than number of selected columns print sql select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6) -sql select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6) -if $data11 != 6 then - return -1 -endi -if $data12 != 6 then - return -1 -endi -if $data13 != 6.00000 then - return -1 -endi +sql_error select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6) # unspecified filling method sql_error select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill (6, 6, 6, 6, 6) @@ -185,7 +164,7 @@ endi # min_with_fill print min_with_fill -sql select min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +sql select min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6) if $rows != 9 then return -1 endi @@ -219,7 +198,7 @@ endi # first_with_fill print first_with_fill -sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, '6', '6') if $rows != 9 then return -1 endi @@ -308,7 +287,7 @@ endi # last_with_fill print last_with_fill -sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6) +sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, '6', '6') if $rows != 9 then return -1 endi @@ -353,7 +332,7 @@ if $data11 != -1 then endi # fill_char_values_to_arithmetic_fields -sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c') +sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c') # fill_multiple_columns sql_error select _wstart, sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc) @@ -379,34 +358,24 @@ endi # fill_into_nonarithmetic_fieds -sql select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000) -#if $data11 != 20000000 then -if $data11 != NULL then - return -1 -endi +sql_error select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000) -sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1) -sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1) -sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1) +sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1) +sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1) +sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1) sql select first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1') # fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24 # fill values into binary or nchar columns will be set to null automatically Note:2018-10-24 sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1') -sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true) +sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true) sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true') # fill nonarithmetic values into arithmetic fields sql_error select count(*) where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc); -sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true'); +sql_error select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true'); -sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1'); -if $rows != 9 then - return -1 -endi -if $data01 != 1 then - return -1 -endi +sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1'); sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1); if $rows != 9 then @@ -416,14 +385,7 @@ if $data01 != 1 then return -1 endi -sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10'); -if $rows != 9 then - return -1 -endi -if $data01 != 1 then - return -1 -endi - +sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10'); ## linear fill # feature currently switched off 2018/09/29 diff --git a/tests/script/tsim/parser/function.sim b/tests/script/tsim/parser/function.sim index 0219a84c646eea8aecc5dee6223bb29f4df5b44f..0002a5d09555083569fc7a6ff32a376e56746dfe 100644 --- a/tests/script/tsim/parser/function.sim +++ b/tests/script/tsim/parser/function.sim @@ -70,6 +70,7 @@ if $data00 != @15-08-18 00:00:00.000@ then return -1 endi if $data01 != 2.068333156 then + print expect 2.068333156, actual: $data01 return -1 endi if $data02 != 2.063999891 then @@ -128,6 +129,7 @@ if $data03 != 2 then return -1 endi if $data11 != 2.077099980 then + print expect 2.077099980, actual: $data11 return -1 endi if $data12 != 2.077000022 then diff --git a/tests/script/tsim/parser/groupby.sim b/tests/script/tsim/parser/groupby.sim index 12a698b1ccb2273d10c1831948103ab88f494d54..4ee9c530a79c72ccac12a99922af1eeefc7485ed 100644 --- a/tests/script/tsim/parser/groupby.sim +++ b/tests/script/tsim/parser/groupby.sim @@ -557,7 +557,7 @@ if $data10 != @{slop:0.000000, intercept:1.000000}@ then return -1 endi -if $data90 != @{slop:0.000000, intercept:9.000000}@ then +if $data90 != @{slop:0.000000, intercept:17.000000}@ then return -1 endi diff --git a/tests/script/tsim/parser/import_file.sim b/tests/script/tsim/parser/import_file.sim index e031e0249dd5a3b9efec7b9fed2505671f645e2c..37dc0c447623a8ea54f8d0e7228e38749e7a41be 100644 --- a/tests/script/tsim/parser/import_file.sim +++ b/tests/script/tsim/parser/import_file.sim @@ -69,4 +69,4 @@ endi system rm -f $inFileName -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/parser/repeatAlter.sim b/tests/script/tsim/parser/repeatAlter.sim index d28a03e193a031ee95b5d237481de8ed31651877..b4012048cc314682e6bdb971a8e4a97fb1c2ca65 100644 --- a/tests/script/tsim/parser/repeatAlter.sim +++ b/tests/script/tsim/parser/repeatAlter.sim @@ -6,4 +6,4 @@ while $i <= $loops $i = $i + 1 endw -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/parser/select_from_cache_disk.sim b/tests/script/tsim/parser/select_from_cache_disk.sim index 0983e36a3a579f88bdb429e9ad62a67c4fe6823b..3c0b13c6388c2386da011b2576262b65a6f018d5 100644 --- a/tests/script/tsim/parser/select_from_cache_disk.sim +++ b/tests/script/tsim/parser/select_from_cache_disk.sim @@ -60,4 +60,4 @@ if $data12 != 1 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/parser/single_row_in_tb.sim b/tests/script/tsim/parser/single_row_in_tb.sim index 1bd53ad24ef17c89bf5bfd1ddec9ed78b969cf9a..e7b4c9a871b4d8409a8a1624ff83b71fb77a77c2 100644 --- a/tests/script/tsim/parser/single_row_in_tb.sim +++ b/tests/script/tsim/parser/single_row_in_tb.sim @@ -33,4 +33,4 @@ print ================== server restart completed run tsim/parser/single_row_in_tb_query.sim -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/parser/single_row_in_tb_query.sim b/tests/script/tsim/parser/single_row_in_tb_query.sim index 422756b798cbeb1b3e70d14d952457df0e54a202..37e193f9d202c0f94748342f3a67e1565e8490d3 100644 --- a/tests/script/tsim/parser/single_row_in_tb_query.sim +++ b/tests/script/tsim/parser/single_row_in_tb_query.sim @@ -195,4 +195,4 @@ endi print ===============>safty check TD-4927 sql select first(ts, c1) from sr_stb where ts<1 group by t1; -sql select first(ts, c1) from sr_stb where ts>0 and ts<1; \ No newline at end of file +sql select first(ts, c1) from sr_stb where ts>0 and ts<1; diff --git a/tests/script/tsim/parser/slimit_query.sim b/tests/script/tsim/parser/slimit_query.sim index 1e04a31099b0a9d948d1fd5fff229b0db940390c..acf0489d3c667834f630b41977240da86dcf4cfd 100644 --- a/tests/script/tsim/parser/slimit_query.sim +++ b/tests/script/tsim/parser/slimit_query.sim @@ -93,25 +93,25 @@ if $rows != 3 then endi ### slimit + fill -sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 5 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 0 offset 0 +sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 5 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 0 offset 0 if $rows != 0 then return -1 endi -sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 2 offset 0 -print select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 2 offset 0 +sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 2 offset 0 +print select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 2 offset 0 print $rows $data00 $data01 $data02 $data03 if $rows != 8 then return -1 endi # desc -sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 2 offset 0 +sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 2 offset 0 if $rows != 8 then return -1 endi -sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 2 offset 598 +sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 2 offset 598 if $rows != 4 then return -1 endi diff --git a/tests/script/tsim/parser/timestamp_query.sim b/tests/script/tsim/parser/timestamp_query.sim index 6e92dbcb3ab28518dc452e474aee955a3003c596..24058cbc84912033b41f49b3e05ee2fecbe4d221 100644 --- a/tests/script/tsim/parser/timestamp_query.sim +++ b/tests/script/tsim/parser/timestamp_query.sim @@ -28,7 +28,7 @@ sql select * from ts_stb0 where ts <> $ts0 ##### select from supertable $tb = $tbPrefix . 0 -sql select _wstart, first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, -1) +sql select _wstart, first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, -1, -1) $res = $rowNum * 2 $n = $res - 2 print ============>$n @@ -47,7 +47,7 @@ if $data13 != 598.000000000 then return -1 endi -sql select _wstart, first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, NULL) +sql select _wstart, first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, NULL, NULL) if $data13 != 598.000000000 then print expect 598.000000000, actual $data03 return -1 diff --git a/tests/script/tsim/query/complex_group.sim b/tests/script/tsim/query/complex_group.sim index 3dad8059cd148504118d56a63f60b25247dc0fb6..d7d14c0ee82b3e10e06f509b4e6a7821be9c901f 100644 --- a/tests/script/tsim/query/complex_group.sim +++ b/tests/script/tsim/query/complex_group.sim @@ -454,4 +454,4 @@ if $rows != 1 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/complex_having.sim b/tests/script/tsim/query/complex_having.sim index 9e28c3803e373e1d973b34c39573b4a7ec4f13f3..4c0af6d10c2d796638be619c6092618217b01257 100644 --- a/tests/script/tsim/query/complex_having.sim +++ b/tests/script/tsim/query/complex_having.sim @@ -365,4 +365,4 @@ if $rows != 1 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/complex_limit.sim b/tests/script/tsim/query/complex_limit.sim index 2a90e7ff1d1f1a4ba25f79a94339219f3d4f5683..acb133f6504f8076161476cfcf6b8f73493157fc 100644 --- a/tests/script/tsim/query/complex_limit.sim +++ b/tests/script/tsim/query/complex_limit.sim @@ -508,4 +508,4 @@ if $rows != 1 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/complex_select.sim b/tests/script/tsim/query/complex_select.sim index f4c9877bfd4c32622238cf21eafac8c35aaafa19..b7697e5cab0e654a40dd16f55f57cfbba4c5653e 100644 --- a/tests/script/tsim/query/complex_select.sim +++ b/tests/script/tsim/query/complex_select.sim @@ -558,4 +558,4 @@ if $data00 != 33 then endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/complex_where.sim b/tests/script/tsim/query/complex_where.sim index bda1c036f02ded7953f8049a46318479b5feb106..847f67ed3461a88c16e1697386f8ee0d6f91d438 100644 --- a/tests/script/tsim/query/complex_where.sim +++ b/tests/script/tsim/query/complex_where.sim @@ -669,4 +669,4 @@ if $rows != 1 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/crash_sql.sim b/tests/script/tsim/query/crash_sql.sim index 1d20491869db719c84065fb6a765268c7366c80b..79a9165e6602b1e8b1931e0f3ad9bf7d0168450f 100644 --- a/tests/script/tsim/query/crash_sql.sim +++ b/tests/script/tsim/query/crash_sql.sim @@ -76,7 +76,7 @@ sql insert into ct4 values ( '2022-05-21 01:01:01.000', NULL, NULL, NULL, NULL, print ================ start query ====================== -print ================ SQL used to cause taosd or taos shell crash +print ================ SQL used to cause taosd or TDengine CLI crash sql_error select sum(c1) ,count(c1) from ct4 group by c1 having sum(c10) between 0 and 1 ; -#system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +#system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/diff.sim b/tests/script/tsim/query/diff.sim index f0d82b01e92bdffc06f951a5d3911ae4338037d9..badd139a9f7b25aa4192e3f97b0cefe825efc597 100644 --- a/tests/script/tsim/query/diff.sim +++ b/tests/script/tsim/query/diff.sim @@ -25,17 +25,17 @@ $i = 0 while $i < $tbNum $tb = $tbPrefix . $i sql create table $tb using $mt tags( $i ) - + $x = 0 while $x < $rowNum $cc = $x * 60000 $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , $x ) + sql insert into $tb values ($ms , $x ) $x = $x + 1 - endw - + endw + $i = $i + 1 -endw +endw sleep 100 @@ -61,7 +61,7 @@ sql select _rowts, diff(tbcol) from $tb where ts > $ms print ===> rows: $rows print ===> $data00 $data01 $data02 $data03 $data04 $data05 print ===> $data10 $data11 $data12 $data13 $data14 $data15 -if $data11 != 1 then +if $data11 != 1 then return -1 endi @@ -72,7 +72,7 @@ sql select _rowts, diff(tbcol) from $tb where ts <= $ms print ===> rows: $rows print ===> $data00 $data01 $data02 $data03 $data04 $data05 print ===> $data10 $data11 $data12 $data13 $data14 $data15 -if $data11 != 1 then +if $data11 != 1 then return -1 endi @@ -82,7 +82,7 @@ sql select _rowts, diff(tbcol) as b from $tb print ===> rows: $rows print ===> $data00 $data01 $data02 $data03 $data04 $data05 print ===> $data10 $data11 $data12 $data13 $data14 $data15 -if $data11 != 1 then +if $data11 != 1 then return -1 endi @@ -107,4 +107,4 @@ if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/explain.sim b/tests/script/tsim/query/explain.sim index 40635dbfd370a77259c10f879b873e4e8ed87d35..2871252d91b822e02911931bf2c8a848472a5e9d 100644 --- a/tests/script/tsim/query/explain.sim +++ b/tests/script/tsim/query/explain.sim @@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/exec.sh -n dnode1 -s start sql connect -print ======== step1 +print ======== step1 sql create database db1 vgroups 3; sql use db1; sql select * from information_schema.ins_databases; @@ -30,7 +30,7 @@ sql insert into tb4 values (now, 4, "Bitmap Heap Scan on tenk1 t1 (cost=5.07..2 #sql insert into tb4 values (now, 4, "Bitmap Heap Scan on tenk1 t1 (cost=5.07..229.20 rows=101 width=244) (actual time=0.080..0.526 rows=100 loops=1)"); -print ======== step2 +print ======== step2 sql explain select * from st1 where -2; sql explain select ts from tb1; sql explain select * from st1; @@ -41,14 +41,14 @@ sql explain select count(*),sum(f1) from st1; sql explain select count(*),sum(f1) from st1 group by f1; #sql explain select count(f1) from tb1 interval(10s, 2s) sliding(3s) fill(prev); -print ======== step3 +print ======== step3 sql explain verbose true select * from st1 where -2; sql explain verbose true select ts from tb1 where f1 > 0; sql explain verbose true select * from st1 where f1 > 0 and ts > '2020-10-31 00:00:00' and ts < '2021-10-31 00:00:00'; sql explain verbose true select count(*) from st1 partition by tbname slimit 1 soffset 2 limit 2 offset 1; sql explain verbose true select * from information_schema.ins_stables where db_name='db2'; -print ======== step4 +print ======== step4 sql explain analyze select ts from st1 where -2; sql explain analyze select ts from tb1; sql explain analyze select ts from st1; @@ -59,7 +59,7 @@ sql explain analyze select count(*),sum(f1) from tb1; sql explain analyze select count(*),sum(f1) from st1; sql explain analyze select count(*),sum(f1) from st1 group by f1; -print ======== step5 +print ======== step5 sql explain analyze verbose true select ts from st1 where -2; sql explain analyze verbose true select ts from tb1; sql explain analyze verbose true select ts from st1; @@ -74,6 +74,7 @@ sql explain analyze verbose true select ts from tb1 where f1 > 0; sql explain analyze verbose true select f1 from st1 where f1 > 0 and ts > '2020-10-31 00:00:00' and ts < '2021-10-31 00:00:00'; sql explain analyze verbose true select * from information_schema.ins_stables where db_name='db2'; sql explain analyze verbose true select * from (select min(f1),count(*) a from st1 where f1 > 0) where a < 0; +sql explain analyze verbose true select count(f1) from st1 group by tbname; #not pass case #sql explain verbose true select count(*),sum(f1) as aa from tb1 where (f1 > 0 or f1 < -1) and ts > '2020-10-31 00:00:00' and ts < '2021-10-31 00:00:00' order by aa; @@ -86,12 +87,12 @@ sql explain analyze verbose true select * from (select min(f1),count(*) a from s #sql explain select * from tb1, tb2 where tb1.ts=tb2.ts; #sql explain select * from st1, st2 where tb1.ts=tb2.ts; #sql explain analyze verbose true select sum(a+b) from (select _rowts, min(f1) b,count(*) a from st1 where f1 > 0 interval(1a)) where a < 0 interval(1s); -#sql explain select min(f1) from st1 interval(1m, 2a) sliding(30s); +#sql explain select min(f1) from st1 interval(1m, 2a) sliding(30s); #sql explain verbose true select count(*),sum(f1) from st1 where f1 > 0 and ts > '2021-10-31 00:00:00' group by f1 having sum(f1) > 0; -#sql explain analyze select min(f1) from st1 interval(3m, 2a) sliding(1m); +#sql explain analyze select min(f1) from st1 interval(3m, 2a) sliding(1m); #sql explain analyze select count(f1) from tb1 interval(10s, 2s) sliding(3s) fill(prev); #sql explain analyze verbose true select count(*),sum(f1) from st1 where f1 > 0 and ts > '2021-10-31 00:00:00' group by f1 having sum(f1) > 0; -#sql explain analyze verbose true select min(f1) from st1 interval(3m, 2a) sliding(1m); +#sql explain analyze verbose true select min(f1) from st1 interval(3m, 2a) sliding(1m); system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/interval.sim b/tests/script/tsim/query/interval.sim index cc8a73daec1ad54fb1448480b0efd317bbd09be9..833da4a8ba2b3daf495167f06c99d222564a6bf3 100644 --- a/tests/script/tsim/query/interval.sim +++ b/tests/script/tsim/query/interval.sim @@ -177,4 +177,4 @@ print =============== clear # return -1 #endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/scalarFunction.sim b/tests/script/tsim/query/scalarFunction.sim index 103e66e54e674c10e3fbe3bd88e044ffe7d0041d..1b8115fec635832116f722ce1fb22810d817a0b7 100644 --- a/tests/script/tsim/query/scalarFunction.sim +++ b/tests/script/tsim/query/scalarFunction.sim @@ -33,7 +33,7 @@ print =============== create normal table sql create table ntb (ts timestamp, c1 int, c2 float, c3 double) sql show tables -if $rows != 101 then +if $rows != 101 then return -1 endi @@ -444,7 +444,7 @@ if $loop_test == 0 then print =============== stop and restart taosd system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s start - + $loop_cnt = 0 check_dnode_ready_0: $loop_cnt = $loop_cnt + 1 @@ -462,7 +462,7 @@ if $loop_test == 0 then goto check_dnode_ready_0 endi - $loop_test = 1 + $loop_test = 1 goto loop_test_pos endi diff --git a/tests/script/tsim/query/scalarNull.sim b/tests/script/tsim/query/scalarNull.sim index ec95c94f23c12babb06b25b06ce140c9a4a5368a..6abe3d62d9b1aaf88872054c5bd040098400debb 100644 --- a/tests/script/tsim/query/scalarNull.sim +++ b/tests/script/tsim/query/scalarNull.sim @@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/exec.sh -n dnode1 -s start sql connect -print ======== step1 +print ======== step1 sql create database db1 vgroups 3; sql use db1; sql select * from information_schema.ins_databases; diff --git a/tests/script/tsim/query/session.sim b/tests/script/tsim/query/session.sim index 158448d76537947d1f6a0fb8d9569becc33fcdd8..b6eb4ed3aa2aae6873eed4fb0c8056c95ebe6bb6 100644 --- a/tests/script/tsim/query/session.sim +++ b/tests/script/tsim/query/session.sim @@ -35,8 +35,8 @@ sql INSERT INTO dev_001 VALUES('2020-05-13 13:00:00.001', 12) sql INSERT INTO dev_001 VALUES('2020-05-14 13:00:00.001', 13) sql INSERT INTO dev_001 VALUES('2020-05-15 14:00:00.000', 14) sql INSERT INTO dev_001 VALUES('2020-05-20 10:00:00.000', 15) -sql INSERT INTO dev_001 VALUES('2020-05-27 10:00:00.001', 16) - +sql INSERT INTO dev_001 VALUES('2020-05-27 10:00:00.001', 16) + sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.000', 1) sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.005', 2) sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.009', 3) @@ -46,7 +46,7 @@ sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.036', 6) sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.51', 7) # vnode does not return the precision of the table -print ====> create database d1 precision 'us' +print ====> create database d1 precision 'us' sql create database d1 precision 'us' sql use d1 sql create table dev_001 (ts timestamp ,i timestamp ,j int) @@ -54,7 +54,7 @@ sql insert into dev_001 values(1623046993681000,now,1)(1623046993681001,now+1s,2 sql create table secondts(ts timestamp,t2 timestamp,i int) sql insert into secondts values(1623046993681000,now,1)(1623046993681001,now+1s,2)(1623046993681002,now+2s,3)(1623046993681004,now+5s,4) -$loop_test = 0 +$loop_test = 0 loop_test_pos: sql use $dbNamme @@ -299,7 +299,7 @@ if $loop_test == 0 then print =============== stop and restart taosd system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s start - + $loop_cnt = 0 check_dnode_ready_0: $loop_cnt = $loop_cnt + 1 @@ -317,7 +317,7 @@ if $loop_test == 0 then goto check_dnode_ready_0 endi - $loop_test = 1 + $loop_test = 1 goto loop_test_pos endi diff --git a/tests/script/tsim/query/stddev.sim b/tests/script/tsim/query/stddev.sim index d61c7273e19ebee84cd0117a9faf163c3a854005..b45c7d80a3edd8319f199e07fd607ab4f474df23 100644 --- a/tests/script/tsim/query/stddev.sim +++ b/tests/script/tsim/query/stddev.sim @@ -409,4 +409,4 @@ if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/time_process.sim b/tests/script/tsim/query/time_process.sim index b3c0e9561f149445a7ae75036736bbf6f8eaf4a4..83a64458465d6d978a38a206b2a7b223cb2bf45d 100644 --- a/tests/script/tsim/query/time_process.sim +++ b/tests/script/tsim/query/time_process.sim @@ -111,4 +111,4 @@ if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/udf.sim b/tests/script/tsim/query/udf.sim index 7cc1403bcb215547209b1c41dcf0351f9fc80bfd..7f8b1044ef528a3a771946f878167b1123ddd9db 100644 --- a/tests/script/tsim/query/udf.sim +++ b/tests/script/tsim/query/udf.sim @@ -9,7 +9,7 @@ system sh/cfg.sh -n dnode1 -c udf -v 1 system sh/exec.sh -n dnode1 -s start sql connect -print ======== step1 udf +print ======== step1 udf system sh/compile_udf.sh sql create database udf vgroups 3; sql use udf; diff --git a/tests/script/tsim/scalar/filter.sim b/tests/script/tsim/scalar/filter.sim new file mode 100644 index 0000000000000000000000000000000000000000..923166227856189e91848150ed9e848f946b066d --- /dev/null +++ b/tests/script/tsim/scalar/filter.sim @@ -0,0 +1,38 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ======== step1 +sql drop database if exists db1; +sql create database db1 vgroups 3; +sql use db1; +sql create stable st1 (fts timestamp, fbool bool, ftiny tinyint, fsmall smallint, fint int, fbig bigint, futiny tinyint unsigned, fusmall smallint unsigned, fuint int unsigned, fubig bigint unsigned, ffloat float, fdouble double, fbin binary(10), fnchar nchar(10)) tags(tts timestamp, tbool bool, ttiny tinyint, tsmall smallint, tint int, tbig bigint, tutiny tinyint unsigned, tusmall smallint unsigned, tuint int unsigned, tubig bigint unsigned, tfloat float, tdouble double, tbin binary(10), tnchar nchar(10)); +sql create table tb1 using st1 tags('2022-07-10 16:31:00', true, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a'); +sql create table tb2 using st1 tags('2022-07-10 16:32:00', false, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); +sql create table tb3 using st1 tags('2022-07-10 16:33:00', true, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c'); + +sql insert into tb1 values ('2022-07-10 16:31:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a'); +sql insert into tb1 values ('2022-07-10 16:31:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); +sql insert into tb1 values ('2022-07-10 16:31:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c'); +sql insert into tb1 values ('2022-07-10 16:31:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd'); +sql insert into tb1 values ('2022-07-10 16:31:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e'); + +sql insert into tb2 values ('2022-07-10 16:32:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a'); +sql insert into tb2 values ('2022-07-10 16:32:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); +sql insert into tb2 values ('2022-07-10 16:32:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c'); +sql insert into tb2 values ('2022-07-10 16:32:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd'); +sql insert into tb2 values ('2022-07-10 16:32:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e'); + +sql insert into tb3 values ('2022-07-10 16:33:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a'); +sql insert into tb3 values ('2022-07-10 16:33:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); +sql insert into tb3 values ('2022-07-10 16:33:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c'); +sql insert into tb3 values ('2022-07-10 16:33:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd'); +sql insert into tb3 values ('2022-07-10 16:33:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e'); + +sql select * from st1 where (ttiny > 2 or ftiny < 5) and ftiny > 2; +if $rows != 7 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/scalar/in.sim b/tests/script/tsim/scalar/in.sim index 60c12a00c2a2ddbbe4210700b976970b767c03c4..75e1face88a9d837f9f668599f92496aedfaf410 100644 --- a/tests/script/tsim/scalar/in.sim +++ b/tests/script/tsim/scalar/in.sim @@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/exec.sh -n dnode1 -s start sql connect -print ======== step1 +print ======== step1 sql drop database if exists db1; sql create database db1 vgroups 3; sql use db1; @@ -11,24 +11,24 @@ sql create stable st1 (fts timestamp, fbool bool, ftiny tinyint, fsmall smallint sql create table tb1 using st1 tags('2022-07-10 16:31:00', true, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a'); sql create table tb2 using st1 tags('2022-07-10 16:32:00', false, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); sql create table tb3 using st1 tags('2022-07-10 16:33:00', true, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c'); - + sql insert into tb1 values ('2022-07-10 16:31:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a'); sql insert into tb1 values ('2022-07-10 16:31:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); sql insert into tb1 values ('2022-07-10 16:31:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c'); sql insert into tb1 values ('2022-07-10 16:31:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd'); sql insert into tb1 values ('2022-07-10 16:31:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e'); - + sql insert into tb2 values ('2022-07-10 16:32:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a'); -sql insert into tb2 values ('2022-07-10 16:32:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); +sql insert into tb2 values ('2022-07-10 16:32:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); sql insert into tb2 values ('2022-07-10 16:32:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c'); -sql insert into tb2 values ('2022-07-10 16:32:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd'); +sql insert into tb2 values ('2022-07-10 16:32:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd'); sql insert into tb2 values ('2022-07-10 16:32:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e'); - + sql insert into tb3 values ('2022-07-10 16:33:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a'); -sql insert into tb3 values ('2022-07-10 16:33:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); +sql insert into tb3 values ('2022-07-10 16:33:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); sql insert into tb3 values ('2022-07-10 16:33:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c'); -sql insert into tb3 values ('2022-07-10 16:33:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd'); -sql insert into tb3 values ('2022-07-10 16:33:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e'); +sql insert into tb3 values ('2022-07-10 16:33:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd'); +sql insert into tb3 values ('2022-07-10 16:33:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e'); sql select * from tb1 where fts in ('2022-07-10 16:31:01', '2022-07-10 16:31:03', 1657441865000); if $rows != 3 then diff --git a/tests/script/tsim/scalar/scalar.sim b/tests/script/tsim/scalar/scalar.sim index 29cc67ec248b16018b508a3714290c53593d14f1..900f7c0904ce9db00133e694782bf11c1e6bf45a 100644 --- a/tests/script/tsim/scalar/scalar.sim +++ b/tests/script/tsim/scalar/scalar.sim @@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/exec.sh -n dnode1 -s start sql connect -print ======== step1 +print ======== step1 sql drop database if exists db1; sql create database db1 vgroups 3; sql use db1; @@ -11,24 +11,24 @@ sql create stable st1 (fts timestamp, fbool bool, ftiny tinyint, fsmall smallint sql create table tb1 using st1 tags('2022-07-10 16:31:00', true, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a'); sql create table tb2 using st1 tags('2022-07-10 16:32:00', false, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); sql create table tb3 using st1 tags('2022-07-10 16:33:00', true, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c'); - + sql insert into tb1 values ('2022-07-10 16:31:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a'); sql insert into tb1 values ('2022-07-10 16:31:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); sql insert into tb1 values ('2022-07-10 16:31:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c'); sql insert into tb1 values ('2022-07-10 16:31:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd'); sql insert into tb1 values ('2022-07-10 16:31:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e'); - + sql insert into tb2 values ('2022-07-10 16:32:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a'); -sql insert into tb2 values ('2022-07-10 16:32:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); +sql insert into tb2 values ('2022-07-10 16:32:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); sql insert into tb2 values ('2022-07-10 16:32:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c'); -sql insert into tb2 values ('2022-07-10 16:32:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd'); +sql insert into tb2 values ('2022-07-10 16:32:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd'); sql insert into tb2 values ('2022-07-10 16:32:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e'); - + sql insert into tb3 values ('2022-07-10 16:33:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a'); -sql insert into tb3 values ('2022-07-10 16:33:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); +sql insert into tb3 values ('2022-07-10 16:33:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b'); sql insert into tb3 values ('2022-07-10 16:33:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c'); -sql insert into tb3 values ('2022-07-10 16:33:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd'); -sql insert into tb3 values ('2022-07-10 16:33:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e'); +sql insert into tb3 values ('2022-07-10 16:33:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd'); +sql insert into tb3 values ('2022-07-10 16:33:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e'); sql select 1+1n; if $rows != 1 then diff --git a/tests/script/tsim/sma/rsmaPersistenceRecovery.sim b/tests/script/tsim/sma/rsmaPersistenceRecovery.sim index f53cd45d484cccf0b7ab564c80abb700e3ef9f19..faff48b61c1216e744d349a301f39fabd6dc578a 100644 --- a/tests/script/tsim/sma/rsmaPersistenceRecovery.sim +++ b/tests/script/tsim/sma/rsmaPersistenceRecovery.sim @@ -35,6 +35,7 @@ sleep 7000 print =============== select * from retention level 2 from memory sql select * from ct1; print $data00 $data01 $data02 +print $data10 $data11 $data12 if $rows > 2 then print retention level 2 file rows $rows > 2 return -1 @@ -51,6 +52,7 @@ endi print =============== select * from retention level 1 from memory sql select * from ct1 where ts > now-8d; print $data00 $data01 $data02 +print $data10 $data11 $data12 if $rows > 2 then print retention level 1 file rows $rows > 2 return -1 @@ -89,6 +91,7 @@ system sh/exec.sh -n dnode1 -s start print =============== select * from retention level 2 from file sql select * from ct1; print $data00 $data01 $data02 +print $data10 $data11 $data12 if $rows > 2 then print retention level 2 file rows $rows > 2 return -1 @@ -104,6 +107,7 @@ endi print =============== select * from retention level 1 from file sql select * from ct1 where ts > now-8d; print $data00 $data01 $data02 +print $data10 $data11 $data12 if $rows > 2 then print retention level 1 file rows $rows > 2 return -1 @@ -141,6 +145,7 @@ sleep 7000 print =============== select * from retention level 2 from file and memory after rsma qtaskinfo recovery sql select * from ct1; print $data00 $data01 $data02 +print $data10 $data11 $data12 if $rows > 2 then print retention level 2 file/mem rows $rows > 2 return -1 @@ -163,6 +168,7 @@ endi print =============== select * from retention level 1 from file and memory after rsma qtaskinfo recovery sql select * from ct1 where ts > now-8d; print $data00 $data01 $data02 +print $data10 $data11 $data12 if $rows > 2 then print retention level 1 file/mem rows $rows > 2 return -1 diff --git a/tests/script/tsim/stream/basic0.sim b/tests/script/tsim/stream/basic0.sim index 9a5fb8012f082ca9a4d10f0ef8f6c2c710d0ac54..6d05f69dcfc542de9b4ebb195479263db4d76881 100644 --- a/tests/script/tsim/stream/basic0.sim +++ b/tests/script/tsim/stream/basic0.sim @@ -1,7 +1,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/exec.sh -n dnode1 -s start -sleep 50 +system sh/cfg.sh -n dnode1 -c debugflag -v 131 +system sh/exec.sh -n dnode1 -s start -v sql connect print =============== create database @@ -137,4 +137,17 @@ if $data13 != 789 then return -1 endi +_OVER: system sh/exec.sh -n dnode1 -s stop -x SIGINT +print =============== check +$null= + +system_content sh/checkValgrind.sh -n dnode1 +print cmd return result ----> [ $system_content ] +if $system_content > 0 then + return -1 +endi + +if $system_content == $null then + return -1 +endi diff --git a/tests/script/tsim/stream/session0.sim b/tests/script/tsim/stream/session0.sim index fee8c98cce034b16a5bc00beaad6edf929d0dd83..3e0af354d8ff4135f4e689c63505baf32871cfb3 100644 --- a/tests/script/tsim/stream/session0.sim +++ b/tests/script/tsim/stream/session0.sim @@ -83,22 +83,22 @@ if $data11 != 3 then goto loop0 endi -if $data12 != NULL then +if $data12 != 10 then print ======data12=$data12 goto loop0 endi -if $data13 != NULL then +if $data13 != 10 then print ======data13=$data13 goto loop0 endi -if $data14 != NULL then +if $data14 != 1.100000000 then print ======data14=$data14 return -1 endi -if $data15 != NULL then +if $data15 != 0.000000000 then print ======data15=$data15 return -1 endi @@ -141,38 +141,38 @@ if $data01 != 7 then goto loop1 endi -if $data02 != NULL then +if $data02 != 18 then print =====data02=$data02 goto loop1 endi -if $data03 != NULL then +if $data03 != 4 then print =====data03=$data03 goto loop1 endi -if $data04 != NULL then - print ======$data04 +if $data04 != 1.000000000 then + print ======data04=$data04 return -1 endi -if $data05 != NULL then - print ======$data05 +if $data05 != 1.154700538 then + print ======data05=$data05 return -1 endi if $data06 != 4 then - print ======$data06 + print ======data06=$data06 return -1 endi if $data07 != 1.000000000 then - print ======$data07 + print ======data07=$data07 return -1 endi if $data08 != 13 then - print ======$data08 + print ======data08=$data08 return -1 endi diff --git a/tests/script/tsim/stream/state0.sim b/tests/script/tsim/stream/state0.sim index 4fa883b8137d43521155df8682251d9147599277..877a2877b9378d2101217080906a68a27ae9fee7 100644 --- a/tests/script/tsim/stream/state0.sim +++ b/tests/script/tsim/stream/state0.sim @@ -5,15 +5,15 @@ sleep 50 sql connect print =============== create database -sql create database test vgroups 1 -sql select * from information_schema.ins_databases +sql create database test vgroups 1; +sql select * from information_schema.ins_databases; if $rows != 3 then return -1 endi print $data00 $data01 $data02 -sql use test +sql use test; sql create table t1(ts timestamp, a int, b int , c int, d double, id int); sql create stream streams1 trigger at_once into streamt1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(a) c4, min(c) c5, max(id) c from t1 state_window(a); diff --git a/tests/script/tsim/tag/3.sim b/tests/script/tsim/tag/3.sim index ee794d6fc7bdc4f0aaf03ac55b36337033605dbf..1b8a9769805422b6da9669397886e825e8be73ec 100644 --- a/tests/script/tsim/tag/3.sim +++ b/tests/script/tsim/tag/3.sim @@ -24,496 +24,496 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int, tgc $i = 0 while $i < 5 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 0, 0, 0 ) + sql create table $tb using $mt tags( 0, 0, 0 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw while $i < 10 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 1, 1, 1 ) + sql create table $tb using $mt tags( 1, 1, 1 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print =============== step2 sql select * from $mt -if $rows != $totalNum then +if $rows != $totalNum then return -1 endi sql select * from $mt where ts < now + 4m -if $rows != 50 then +if $rows != 50 then return -1 endi sql select * from $mt where ts > now + 4m -if $rows != 150 then +if $rows != 150 then return -1 endi sql select * from $mt where ts = now + 4m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m -if $rows != 10 then +if $rows != 10 then return -1 endi print =============== step3 sql select * from $mt where tgcol1 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 = true -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 <> true -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 = false -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 <> false -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step4 sql select * from $mt where tgcol2 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step5 sql select * from $mt where tgcol3 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol3 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol3 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol3 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step6 sql select * from $mt where ts > now + 4m and tgcol1 = true -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol1 <> true -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol1 = false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol1 <> false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol1 = false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol1 <> false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol1 <> false -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol1 <> false and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step7 sql select * from $mt where ts > now + 4m and tgcol2 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol2 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step8 sql select * from $mt where ts > now + 4m and tgcol3 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol3 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step9 sql select * from $mt where ts > now + 4m and tgcol2 = 1 and tgcol1 = true -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 1 and tgcol1 <> true -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 = 0 and tgcol1 = false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 <> 0 and tgcol1 <> false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 = 0 and tgcol1 = false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 <> 0 and tgcol1 <> false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol2 <> 0 and tgcol1 <> false -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 0 and ts < now + 5m and ts < now + 5m and tgcol1 <> false -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step10 sql select * from $mt where ts > now + 4m and tgcol3 = 1 and tgcol1 = true -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 1 and tgcol1 <> true -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 = 0 and tgcol1 = false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 <> 0 and tgcol1 <> false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 = 0 and tgcol1 = false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 <> 0 and tgcol1 <> false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol3 <> 0 and tgcol1 <> false -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 0 and ts < now + 5m and ts < now + 5m and tgcol1 <> false -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step11 sql select * from $mt where ts > now + 4m and tgcol3 = 1 and tgcol2 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 1 and tgcol2 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 = 0 and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 <> 0 and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 = 0 and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 <> 0 and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol3 <> 0 and tgcol2 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 0 and ts < now + 5m and ts < now + 5m and tgcol2 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step12 sql select * from $mt where ts > now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol1 <> 1 and tgcol2 <> 1 and tgcol3 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol1 = 0 and tgcol2 = 0 and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol1 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol1 = 0 and tgcol2 = 0 and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol1 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol1 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol1 <> 0 and ts < now + 5m and ts < now + 5m and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step13 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 200 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then return -1 endi print =============== step14 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = true -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = true and tgcol2 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = true and tgcol2 = 1 +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = true and tgcol2 = 1 and tgcol3 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step15 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 50 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 50 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = true -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = true and tgcol2 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = true and tgcol2 = 1 +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = true and tgcol2 = 1 and tgcol3 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step16 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol3 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step17 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = true group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = true and tgcol2 = 1 group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = true and tgcol2 = 1 and tgcol3 = 1 group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step18 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = true group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = true and tgcol2 = 1 group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = true and tgcol2 = 1 and tgcol3 = 1 group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step19 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = true and tgcol2 = 1 and tgcol3 = 1 partition by tgcol1 interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = true and tgcol2 = 1 and tgcol3 = 1 partition by tgcol2 interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = true and tgcol2 = 1 and tgcol3 = 1 partition by tgcol3 interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/4.sim b/tests/script/tsim/tag/4.sim index 7ad253bf14ac92bd6bb301f3f33dab87da013eb2..9ffe9703c6ce8e5bba4b7dd5918237d7c2d106ef 100644 --- a/tests/script/tsim/tag/4.sim +++ b/tests/script/tsim/tag/4.sim @@ -24,686 +24,686 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 smallint, tgcol2 bigi $i = 0 while $i < 5 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 0, 0, 0, 0 ) + sql create table $tb using $mt tags( 0, 0, 0, 0 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw while $i < 10 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 1, 1, 1, 1 ) + sql create table $tb using $mt tags( 1, 1, 1, 1 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print =============== step2 sql select * from $mt -if $rows != $totalNum then +if $rows != $totalNum then return -1 endi sql select * from $mt where ts < now + 4m -if $rows != 50 then +if $rows != 50 then return -1 endi sql select * from $mt where ts > now + 4m -if $rows != 150 then +if $rows != 150 then return -1 endi sql select * from $mt where ts = now + 4m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m -if $rows != 10 then +if $rows != 10 then return -1 endi print =============== step3 sql select * from $mt where tgcol1 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step4 sql select * from $mt where tgcol2 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step5 sql select * from $mt where tgcol3 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol3 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol3 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol3 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step6 sql select * from $mt where tgcol4 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol4 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol4 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol4 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step7 sql select * from $mt where ts > now + 4m and tgcol1 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol1 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol1 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol1 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step8 sql select * from $mt where ts > now + 4m and tgcol2 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol2 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step9 sql select * from $mt where ts > now + 4m and tgcol3 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol3 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step10 sql select * from $mt where ts > now + 4m and tgcol4 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol4 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step11 sql select * from $mt where ts > now + 4m and tgcol2 = 1 and tgcol1 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 1 and tgcol1 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 = 0 and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 <> 0 and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 = 0 and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 <> 0 and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol2 <> 0 and tgcol1 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 0 and ts < now + 5m and ts < now + 5m and tgcol1 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step12 sql select * from $mt where ts > now + 4m and tgcol3 = 1 and tgcol1 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 1 and tgcol1 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 = 0 and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 <> 0 and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 = 0 and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 <> 0 and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol3 <> 0 and tgcol1 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 0 and ts < now + 5m and ts < now + 5m and tgcol1 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step13 sql select * from $mt where ts > now + 4m and tgcol3 = 1 and tgcol2 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 1 and tgcol2 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 = 0 and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 <> 0 and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 = 0 and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 <> 0 and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol3 <> 0 and tgcol2 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 0 and ts < now + 5m and ts < now + 5m and tgcol2 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step14 sql select * from $mt where ts > now + 4m and tgcol3 = 1 and tgcol4 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 1 and tgcol4 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 = 0 and tgcol4 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 <> 0 and tgcol4 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 = 0 and tgcol4 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 <> 0 and tgcol4 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol3 <> 0 and tgcol4 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 0 and ts < now + 5m and ts < now + 5m and tgcol4 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step15 sql select * from $mt where ts > now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol1 <> 1 and tgcol2 <> 1 and tgcol3 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol1 = 0 and tgcol2 = 0 and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol1 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol1 = 0 and tgcol2 = 0 and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol1 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol1 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol1 <> 0 and ts < now + 5m and ts < now + 5m and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step16 sql select * from $mt where ts > now + 4m and tgcol4 = 1 and tgcol2 = 1 and tgcol3 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 1 and tgcol2 <> 1 and tgcol3 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 = 0 and tgcol2 = 0 and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 = 0 and tgcol2 = 0 and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 0 and ts < now + 5m and ts < now + 5m and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step17 sql select * from $mt where ts > now + 4m and tgcol4 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol1 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 1 and tgcol2 <> 1 and tgcol3 <> 1 and tgcol1 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 = 0 and tgcol2 = 0 and tgcol3 = 0 and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 = 0 and tgcol2 = 0 and tgcol3 = 0 and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 0 and ts < now + 5m and ts < now + 5m and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step18 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 200 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then return -1 endi print =============== step19 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step20 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 50 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 50 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step21 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol3 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol4 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step22 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step23 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step24 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 partition by tgcol1 interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 partition by tgcol2 interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 partition by tgcol2 interval(1d) +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 partition by tgcol3 interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 partition by tgcol4 interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/5.sim b/tests/script/tsim/tag/5.sim index eaf613e9d1c50961e1fb5419356fcfe4e6c93a7e..e1ac606dfe529a46ba48c5a1abe4bce0136626a6 100644 --- a/tests/script/tsim/tag/5.sim +++ b/tests/script/tsim/tag/5.sim @@ -24,809 +24,809 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 tinyint, tgcol2 int, $i = 0 while $i < 5 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 0, 0, 0, 0, '0' ) + sql create table $tb using $mt tags( 0, 0, 0, 0, '0' ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw while $i < 10 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 1, 1, 1, 1, '1' ) + sql create table $tb using $mt tags( 1, 1, 1, 1, '1' ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print =============== step2 sql select * from $mt -if $rows != $totalNum then +if $rows != $totalNum then return -1 endi sql select * from $mt where ts < now + 4m -if $rows != 50 then +if $rows != 50 then return -1 endi sql select * from $mt where ts > now + 4m -if $rows != 150 then +if $rows != 150 then return -1 endi sql select * from $mt where ts = now + 4m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m -if $rows != 10 then +if $rows != 10 then return -1 endi print =============== step3 sql select * from $mt where tgcol1 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step4 sql select * from $mt where tgcol2 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step5 sql select * from $mt where tgcol3 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol3 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol3 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol3 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step6 sql select * from $mt where tgcol4 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol4 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol4 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol4 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step7 sql select * from $mt where tgcol5 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol5 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol5 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol5 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step8 sql select * from $mt where ts > now + 4m and tgcol1 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol1 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol1 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol1 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step9 sql select * from $mt where ts > now + 4m and tgcol2 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol2 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step10 sql select * from $mt where ts > now + 4m and tgcol3 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol3 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step11 sql select * from $mt where ts > now + 4m and tgcol4 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol4 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step12 sql select * from $mt where ts > now + 4m and tgcol5 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol5 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol5 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol5 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol5 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol5 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol5 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol5 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step13 sql select * from $mt where ts > now + 4m and tgcol2 = 1 and tgcol1 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 1 and tgcol1 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 = 0 and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 <> 0 and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 = 0 and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 <> 0 and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol2 <> 0 and tgcol1 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 0 and ts < now + 5m and ts < now + 5m and tgcol1 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step14 sql select * from $mt where ts > now + 4m and tgcol3 = 1 and tgcol2 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 1 and tgcol2 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 = 0 and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 <> 0 and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 = 0 and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 <> 0 and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol3 <> 0 and tgcol2 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 0 and ts < now + 5m and ts < now + 5m and tgcol2 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step15 sql select * from $mt where ts > now + 4m and tgcol3 = 1 and tgcol4 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 1 and tgcol4 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 = 0 and tgcol4 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 <> 0 and tgcol4 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 = 0 and tgcol4 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 <> 0 and tgcol4 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol3 <> 0 and tgcol4 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 0 and ts < now + 5m and ts < now + 5m and tgcol4 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step16 sql select * from $mt where ts > now + 4m and tgcol5 = 1 and tgcol4 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol5 <> 1 and tgcol4 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol5 = 0 and tgcol4 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol5 <> 0 and tgcol4 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol5 = 0 and tgcol4 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol5 <> 0 and tgcol4 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol5 <> 0 and tgcol4 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol5 <> 0 and ts < now + 5m and ts < now + 5m and tgcol4 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step17 sql select * from $mt where ts > now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol1 <> 1 and tgcol2 <> 1 and tgcol3 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol1 = 0 and tgcol2 = 0 and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol1 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol1 = 0 and tgcol2 = 0 and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol1 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol1 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol1 <> 0 and ts < now + 5m and ts < now + 5m and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step18 sql select * from $mt where ts > now + 4m and tgcol4 = 1 and tgcol2 = 1 and tgcol3 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 1 and tgcol2 <> 1 and tgcol3 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 = 0 and tgcol2 = 0 and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 = 0 and tgcol2 = 0 and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 0 and ts < now + 5m and ts < now + 5m and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step19 sql select * from $mt where ts > now + 4m and tgcol4 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol1 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 1 and tgcol2 <> 1 and tgcol3 <> 1 and tgcol1 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 = 0 and tgcol2 = 0 and tgcol3 = 0 and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 = 0 and tgcol2 = 0 and tgcol3 = 0 and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 0 and ts < now + 5m and ts < now + 5m and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step20 sql select * from $mt where ts > now + 4m and tgcol4 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol1 = 1 and tgcol5 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 1 and tgcol2 <> 1 and tgcol3 <> 1 and tgcol1 <> 1 and tgcol5 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 = 0 and tgcol2 = 0 and tgcol3 = 0 and tgcol1 = 0 and tgcol5 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 and tgcol5 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 = 0 and tgcol2 = 0 and tgcol3 = 0 and tgcol1 = 0 and tgcol5 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 and tgcol5 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 and tgcol5 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 0 and ts < now + 5m and ts < now + 5m and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 and tgcol5 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step21 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 200 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then return -1 endi print =============== step22 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 and tgcol5 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step23 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 50 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 50 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 and tgcol5 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step24 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol3 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol4 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol5 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step25 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 and tgcol5 = 1 group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step26 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 and tgcol5 = 1 group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step27 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 partition by tgcol1 interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 partition by tgcol2 interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 partition by tgcol3 interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 partition by tgcol4 interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 and tgcol5 = 1 partition by tgcol5 interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/6.sim b/tests/script/tsim/tag/6.sim index 31aa5b1747347b05ba13df06488cd8365c28a599..655129255d2c6cec7e6543e8e3f728c294d3f6b4 100644 --- a/tests/script/tsim/tag/6.sim +++ b/tests/script/tsim/tag/6.sim @@ -24,964 +24,964 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 bi $i = 0 while $i < 5 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( '0', 0, 0, 0, '0', '0' ) + sql create table $tb using $mt tags( '0', 0, 0, 0, '0', '0' ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw while $i < 10 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( '1', 1, 1, 1, '1', '1' ) + sql create table $tb using $mt tags( '1', 1, 1, 1, '1', '1' ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print =============== step2 sql select * from $mt -if $rows != $totalNum then +if $rows != $totalNum then return -1 endi sql select * from $mt where ts < now + 4m -if $rows != 50 then +if $rows != 50 then return -1 endi sql select * from $mt where ts > now + 4m -if $rows != 150 then +if $rows != 150 then return -1 endi sql select * from $mt where ts = now + 4m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m -if $rows != 10 then +if $rows != 10 then return -1 endi print =============== step3 sql select * from $mt where tgcol1 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol1 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step4 sql select * from $mt where tgcol2 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step5 sql select * from $mt where tgcol3 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol3 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol3 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol3 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step6 sql select * from $mt where tgcol4 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol4 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol4 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol4 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step7 sql select * from $mt where tgcol5 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol5 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol5 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol5 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step8 sql select * from $mt where tgcol6 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol6 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol6 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol6 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step9 sql select * from $mt where ts > now + 4m and tgcol1 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol1 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol1 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol1 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step10 sql select * from $mt where ts > now + 4m and tgcol2 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol2 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step11 sql select * from $mt where ts > now + 4m and tgcol3 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol3 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step12 sql select * from $mt where ts > now + 4m and tgcol4 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol4 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step13 sql select * from $mt where ts > now + 4m and tgcol5 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol5 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol5 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol5 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol5 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol5 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol5 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol5 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step14 sql select * from $mt where ts > now + 4m and tgcol6 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol6 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol6 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol6 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol6 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol6 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol6 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol6 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step15 sql select * from $mt where ts > now + 4m and tgcol2 = 1 and tgcol1 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 1 and tgcol1 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 = 0 and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 <> 0 and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 = 0 and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 <> 0 and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol2 <> 0 and tgcol1 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 0 and ts < now + 5m and ts < now + 5m and tgcol1 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step16 sql select * from $mt where ts > now + 4m and tgcol3 = 1 and tgcol2 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 1 and tgcol2 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 = 0 and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 <> 0 and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 = 0 and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 <> 0 and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol3 <> 0 and tgcol2 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 0 and ts < now + 5m and ts < now + 5m and tgcol2 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step17 sql select * from $mt where ts > now + 4m and tgcol3 = 1 and tgcol4 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 1 and tgcol4 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 = 0 and tgcol4 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol3 <> 0 and tgcol4 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 = 0 and tgcol4 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol3 <> 0 and tgcol4 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol3 <> 0 and tgcol4 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol3 <> 0 and ts < now + 5m and ts < now + 5m and tgcol4 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step18 sql select * from $mt where ts > now + 4m and tgcol5 = 1 and tgcol4 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol5 <> 1 and tgcol4 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol5 = 0 and tgcol4 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol5 <> 0 and tgcol4 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol5 = 0 and tgcol4 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol5 <> 0 and tgcol4 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol5 <> 0 and tgcol4 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol5 <> 0 and ts < now + 5m and ts < now + 5m and tgcol4 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step19 sql select * from $mt where ts > now + 4m and tgcol5 = 1 and tgcol6 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol5 <> 1 and tgcol6 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol5 = 0 and tgcol6 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol5 <> 0 and tgcol6 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol5 = 0 and tgcol6 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol5 <> 0 and tgcol6 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol5 <> 0 and tgcol6 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol5 <> 0 and ts < now + 5m and ts < now + 5m and tgcol6 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step20 sql select * from $mt where ts > now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol1 <> 1 and tgcol2 <> 1 and tgcol3 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol1 = 0 and tgcol2 = 0 and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol1 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol1 = 0 and tgcol2 = 0 and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol1 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol1 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol1 <> 0 and ts < now + 5m and ts < now + 5m and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step21 sql select * from $mt where ts > now + 4m and tgcol4 = 1 and tgcol2 = 1 and tgcol3 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 1 and tgcol2 <> 1 and tgcol3 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 = 0 and tgcol2 = 0 and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 = 0 and tgcol2 = 0 and tgcol3 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 0 and ts < now + 5m and ts < now + 5m and tgcol2 <> 0 and tgcol3 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step22 sql select * from $mt where ts > now + 4m and tgcol4 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol1 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 1 and tgcol2 <> 1 and tgcol3 <> 1 and tgcol1 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 = 0 and tgcol2 = 0 and tgcol3 = 0 and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 = 0 and tgcol2 = 0 and tgcol3 = 0 and tgcol1 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 0 and ts < now + 5m and ts < now + 5m and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step23 sql select * from $mt where ts > now + 4m and tgcol4 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol1 = 1 and tgcol5 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 1 and tgcol2 <> 1 and tgcol3 <> 1 and tgcol1 <> 1 and tgcol5 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 = 0 and tgcol2 = 0 and tgcol3 = 0 and tgcol1 = 0 and tgcol5 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 and tgcol5 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 = 0 and tgcol2 = 0 and tgcol3 = 0 and tgcol1 = 0 and tgcol5 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 and tgcol5 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 and tgcol5 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 0 and ts < now + 5m and ts < now + 5m and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 and tgcol5 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step24 sql select * from $mt where ts > now + 4m and tgcol4 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol1 = 1 and tgcol5 = 1 and tgcol6 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 1 and tgcol2 <> 1 and tgcol3 <> 1 and tgcol1 <> 1 and tgcol5 <> 1 and tgcol6 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 = 0 and tgcol2 = 0 and tgcol3 = 0 and tgcol1 = 0 and tgcol5 = 0 and tgcol6 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 and tgcol5 <> 0 and tgcol6 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 = 0 and tgcol2 = 0 and tgcol3 = 0 and tgcol1 = 0 and tgcol5 = 0 and tgcol6 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 and tgcol5 <> 0 and tgcol6 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol4 <> 0 and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 and tgcol5 <> 0 and tgcol6 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol4 <> 0 and ts < now + 5m and ts < now + 5m and tgcol2 <> 0 and tgcol3 <> 0 and tgcol1 <> 0 and tgcol5 <> 0 and tgcol6 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step25 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 200 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then return -1 endi print =============== step26 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 and tgcol5 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 and tgcol5 = 1 and tgcol6 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step27 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 50 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 50 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 and tgcol5 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 and tgcol5 = 1 and tgcol6 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step28 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol3 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol4 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol5 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol6 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step29 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 and tgcol5 = 1 group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 and tgcol5 = 1 and tgcol6 = 1 group by tgcol1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step30 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 and tgcol5 = 1 group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 and tgcol5 = 1 and tgcol6 = 1 group by tgcol2 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step31 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 partition by tgcol1 interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 partition by tgcol2 interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 partition by tgcol3 interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 partition by tgcol4 interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 and tgcol5 = 1 partition by tgcol5 interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol1 = 1 and tgcol2 = 1 and tgcol3 = 1 and tgcol4 = 1 and tgcol5 = 1 and tgcol6 = 1 partition by tgcol6 interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/add.sim b/tests/script/tsim/tag/add.sim index 78244d74c3f8664c7cb66ed8e484663a30696481..2b528c02552150b8a5dbe5a5115de3fe96144eb5 100644 --- a/tests/script/tsim/tag/add.sim +++ b/tests/script/tsim/tag/add.sim @@ -25,39 +25,39 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi sql alter table $mt drop tag tgcol2 sql alter table $mt add tag tgcol4 int sql reset query cache -sql alter table $tb set tag tgcol4 =4 +sql alter table $tb set tag tgcol4 =4 sql reset query cache sql select * from $mt where tgcol4 = 4 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi @@ -71,61 +71,61 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 smallint, tgcol2 tinyint) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi sql alter table $mt drop tag tgcol2 sql alter table $mt add tag tgcol4 tinyint sql reset query cache -sql alter table $tb set tag tgcol4=4 +sql alter table $tb set tag tgcol4=4 sql reset query cache sql select * from $mt where tgcol4 = 4 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi sql select * from $mt where tgcol2 = 1 -x step3 return -1 step3: - + print =============== step4 $i = 4 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bigint, tgcol2 float) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi if $data03 != 2.00000 then @@ -134,34 +134,34 @@ endi sql describe $tb print sql describe $tb -if $data21 != BIGINT then +if $data21 != BIGINT then return -1 endi -if $data31 != FLOAT then +if $data31 != FLOAT then return -1 endi -if $data23 != TAG then +if $data23 != TAG then return -1 endi -if $data33 != TAG then +if $data33 != TAG then return -1 endi sql alter table $mt drop tag tgcol2 sql alter table $mt add tag tgcol4 float sql reset query cache -sql alter table $tb set tag tgcol4=4 +sql alter table $tb set tag tgcol4=4 sql reset query cache sql select * from $mt where tgcol4 = 4 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi if $data03 != 4.00000 then @@ -171,184 +171,184 @@ endi sql select * from $mt where tgcol2 = 1 -x step4 return -1 step4: - + print =============== step5 $i = 5 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 double, tgcol2 binary(10)) sql create table $tb using $mt tags( 1, '2' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = '2' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1.000000000 then +if $data02 != 1.000000000 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi sql alter table $mt drop tag tgcol2 sql alter table $mt add tag tgcol4 smallint sql reset query cache -sql alter table $tb set tag tgcol4=4 +sql alter table $tb set tag tgcol4=4 sql reset query cache sql select * from $mt where tgcol4 = 4 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1.000000000 then +if $data02 != 1.000000000 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi sql select * from $mt where tgcol3 = '1' -x step5 return -1 step5: - + print =============== step6 $i = 6 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int, tgcol3 tinyint) sql create table $tb using $mt tags( 1, 2, 3 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi sql alter table $mt rename tag tgcol1 tgcol4 -sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol3 sql alter table $mt add tag tgcol5 binary(10) sql alter table $mt add tag tgcol6 binary(10) sql reset query cache -sql alter table $tb set tag tgcol4=false -sql alter table $tb set tag tgcol5='5' -sql alter table $tb set tag tgcol6='6' +sql alter table $tb set tag tgcol4=false +sql alter table $tb set tag tgcol5='5' +sql alter table $tb set tag tgcol6='6' sql reset query cache sql select * from $mt where tgcol5 = '5' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 0 then return -1 endi -if $data03 != 5 then +if $data03 != 5 then return -1 endi -if $data04 != 6 then +if $data04 != 6 then return -1 endi sql select * from $mt where tgcol6 = '6' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 0 then return -1 endi -if $data03 != 5 then +if $data03 != 5 then return -1 endi -if $data04 != 6 then +if $data04 != 6 then return -1 endi sql select * from $mt where tgcol4 = 1 -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where tgcol3 = 1 -x step52 return -1 step52: - + print =============== step7 $i = 7 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 smallint, tgcol2 tinyint, tgcol3 binary(10)) sql create table $tb using $mt tags( 1, 2, '3' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol3 = '3' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi sql alter table $mt rename tag tgcol1 tgcol4 -sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol3 sql alter table $mt add tag tgcol5 bigint sql alter table $mt add tag tgcol6 tinyint sql reset query cache -sql alter table $tb set tag tgcol4=4 -sql alter table $tb set tag tgcol5=5 -sql alter table $tb set tag tgcol6=6 +sql alter table $tb set tag tgcol4=4 +sql alter table $tb set tag tgcol5=5 +sql alter table $tb set tag tgcol6=6 sql reset query cache sql select * from $mt where tgcol6 = 6 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 4 then +if $data02 != 4 then return -1 endi -if $data03 != 5 then +if $data03 != 5 then return -1 endi -if $data04 != 6 then +if $data04 != 6 then return -1 endi @@ -358,55 +358,55 @@ step71: sql select * from $mt where tgcol3 = 1 -x step72 return -1 step72: - + print =============== step8 $i = 8 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bigint, tgcol2 float, tgcol3 binary(10)) sql create table $tb using $mt tags( 1, 2, '3' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol3 = '3' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi if $data03 != 2.00000 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi sql alter table $mt rename tag tgcol1 tgcol4 -sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol3 sql alter table $mt add tag tgcol5 binary(17) sql alter table $mt add tag tgcol6 bool sql reset query cache -sql alter table $tb set tag tgcol4=4 -sql alter table $tb set tag tgcol5='5' +sql alter table $tb set tag tgcol4=4 +sql alter table $tb set tag tgcol5='5' sql alter table $tb set tag tgcol6='1' sql reset query cache sql select * from $mt where tgcol5 = '5' print select * from $mt where tgcol5 = 5 print $data01 $data02 $data03 $data04 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 4 then +if $data02 != 4 then return -1 endi -if $data03 != 5 then +if $data03 != 5 then return -1 endi if $data04 != 0 then @@ -426,45 +426,45 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 double, tgcol2 binary(10), tgcol3 binary(10)) sql create table $tb using $mt tags( 1, '2', '3' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = '2' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1.000000000 then +if $data02 != 1.000000000 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi sql alter table $mt rename tag tgcol1 tgcol4 -sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol3 sql alter table $mt add tag tgcol5 bool sql alter table $mt add tag tgcol6 float sql reset query cache -sql alter table $tb set tag tgcol4=4 +sql alter table $tb set tag tgcol4=4 sql alter table $tb set tag tgcol5=1 sql alter table $tb set tag tgcol6=6 sql reset query cache sql select * from $mt where tgcol5 = 1 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 4.000000000 then +if $data02 != 4.000000000 then return -1 endi if $data03 != 1 then @@ -487,24 +487,24 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 binary(10), tgcol3 binary(10), tgcol4 binary(10)) sql create table $tb using $mt tags( '1', '2', '3', '4' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol4 = '4' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi -if $data05 != 4 then +if $data05 != 4 then return -1 endi @@ -512,7 +512,7 @@ sql alter table $mt rename tag tgcol1 tgcol4 -x step103 return -1 step103: -sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol3 sql alter table $mt drop tag tgcol4 sql reset query cache @@ -520,28 +520,28 @@ sql alter table $mt add tag tgcol4 binary(10) sql alter table $mt add tag tgcol5 bool sql reset query cache -sql alter table $tb set tag tgcol4='4' +sql alter table $tb set tag tgcol4='4' sql alter table $tb set tag tgcol5=false sql reset query cache sql select * from $mt where tgcol4 = '4' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi if $data04 != 0 then return -1 endi -if $data05 != null then +if $data05 != null then return -1 endi @@ -558,27 +558,27 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int, tgcol3 smallint, tgcol4 float, tgcol5 binary(10)) sql create table $tb using $mt tags( 1, 2, 3, 4, '5' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol1 = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi if $data05 != 4.00000 then return -1 endi -if $data06 != 5 then +if $data06 != 5 then return -1 endi @@ -586,7 +586,7 @@ sql alter table $mt rename tag tgcol1 tgcol4 -x step114 return -1 step114: -sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol3 sql alter table $mt drop tag tgcol4 sql alter table $mt drop tag tgcol5 @@ -598,7 +598,7 @@ sql alter table $mt add tag tgcol7 bigint sql alter table $mt add tag tgcol8 smallint sql reset query cache -sql alter table $tb set tag tgcol4='4' +sql alter table $tb set tag tgcol4='4' sql alter table $tb set tag tgcol5=5 sql alter table $tb set tag tgcol6='6' sql alter table $tb set tag tgcol7=7 @@ -607,28 +607,28 @@ sql reset query cache sql select * from $mt where tgcol5 =5 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi -if $data04 != 5 then +if $data04 != 5 then return -1 endi -if $data05 != 6 then +if $data05 != 6 then return -1 endi -if $data06 != 7 then +if $data06 != 7 then return -1 endi -if $data07 != 8 then +if $data07 != 8 then return -1 endi @@ -648,34 +648,34 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 smallint, tgcol3 float, tgcol4 double, tgcol5 binary(10), tgcol6 binary(20)) sql create table $tb using $mt tags( 1, 2, 3, 4, '5', '6' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol1 = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi if $data04 != 3.00000 then return -1 endi -if $data05 != 4.000000000 then +if $data05 != 4.000000000 then return -1 endi -if $data06 != 5 then +if $data06 != 5 then return -1 endi -if $data07 != 6 then +if $data07 != 6 then return -1 endi -sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol3 sql alter table $mt drop tag tgcol4 sql alter table $mt drop tag tgcol5 @@ -686,8 +686,8 @@ sql alter table $mt add tag tgcol4 binary(10) sql alter table $mt add tag tgcol5 bigint sql reset query cache -sql alter table $tb set tag tgcol1=false -sql alter table $tb set tag tgcol2='5' +sql alter table $tb set tag tgcol1=false +sql alter table $tb set tag tgcol2='5' sql alter table $tb set tag tgcol3=4 sql alter table $tb set tag tgcol4='3' sql alter table $tb set tag tgcol5=2 @@ -696,28 +696,28 @@ sql reset query cache sql select * from $mt where tgcol4 = '3' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 0 then return -1 endi -if $data03 != 1 then +if $data03 != 1 then return -1 endi -if $data04 != 5 then +if $data04 != 5 then return -1 endi -if $data05 != 4 then +if $data05 != 4 then return -1 endi -if $data06 != 3 then +if $data06 != 3 then return -1 endi -if $data07 != 2 then +if $data07 != 2 then return -1 endi @@ -747,34 +747,34 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 int, tgcol3 smallint, tgcol4 binary(11), tgcol5 double, tgcol6 binary(20)) sql create table $tb using $mt tags( '1', 2, 3, '4', 5, '6' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol1 = '1' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi -if $data05 != 4 then +if $data05 != 4 then return -1 endi -if $data06 != 5.000000000 then +if $data06 != 5.000000000 then return -1 endi -if $data07 != 6 then +if $data07 != 6 then return -1 endi -sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol4 sql alter table $mt drop tag tgcol6 sql reset query cache @@ -783,8 +783,8 @@ sql alter table $mt add tag tgcol4 int sql alter table $mt add tag tgcol6 bigint sql reset query cache -sql alter table $tb set tag tgcol1='7' -sql alter table $tb set tag tgcol2='8' +sql alter table $tb set tag tgcol1='7' +sql alter table $tb set tag tgcol2='8' sql alter table $tb set tag tgcol3=9 sql alter table $tb set tag tgcol4=10 sql alter table $tb set tag tgcol5=11 @@ -793,28 +793,28 @@ sql reset query cache sql select * from $mt where tgcol2 = '8' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 7 then +if $data02 != 7 then return -1 endi -if $data03 != 9 then +if $data03 != 9 then return -1 endi -if $data04 != 11.000000000 then +if $data04 != 11.000000000 then return -1 endi -if $data05 != 8 then +if $data05 != 8 then return -1 endi -if $data06 != 10 then +if $data06 != 10 then return -1 endi -if $data07 != 12 then +if $data07 != 12 then return -1 endi @@ -824,7 +824,7 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 bigint) sql create table $tb using $mt tags( 1, 1 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql alter table $mt add tag tgcol3 binary(10) sql alter table $mt add tag tgcol4 int @@ -834,13 +834,13 @@ sql alter table $mt add tag tgcol6 bigint sql reset query cache sql alter table $mt drop tag tgcol6 sql alter table $mt add tag tgcol7 bigint -sql alter table $mt add tag tgcol8 bigint +sql alter table $mt add tag tgcol8 bigint print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/bigint.sim b/tests/script/tsim/tag/bigint.sim index dc5a03152bdceb17c21ea5ed8652e6acaa09a646..26a5addf6a3f9d5e717b21b5003ac719d8b4d04d 100644 --- a/tests/script/tsim/tag/bigint.sim +++ b/tests/script/tsim/tag/bigint.sim @@ -24,50 +24,50 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol bigint) $i = 0 while $i < 5 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 0 ) + sql create table $tb using $mt tags( 0 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw while $i < 10 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 1 ) + sql create table $tb using $mt tags( 1 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print =============== step2 sql select * from $tb -if $rows != $rowNum then +if $rows != $rowNum then return -1 endi sql select * from $tb where ts < now + 4m -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $tb where ts <= now + 4m -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $tb where ts > now + 4m -if $rows != 15 then +if $rows != 15 then return -1 endi sql select * from $tb where ts >= now + 4m -if $rows != 15 then +if $rows != 15 then return -1 endi sql select * from $tb where ts > now + 4m and ts < now + 5m -if $rows != 1 then +if $rows != 1 then return -1 endi sql select * from $tb where ts < now + 4m and ts > now + 5m @@ -83,155 +83,155 @@ if $rows != 0 then return -1 endi sql select * from $tb where ts > now + 4m and ts > now + 5m and ts < now + 6m -if $rows != 1 then +if $rows != 1 then return -1 endi print =============== step3 sql select * from $mt -if $rows != $totalNum then +if $rows != $totalNum then return -1 endi sql select * from $mt where ts < now + 4m -if $rows != 50 then +if $rows != 50 then return -1 endi sql select * from $mt where ts > now + 4m -if $rows != 150 then +if $rows != 150 then return -1 endi sql select * from $mt where ts = now + 4m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m -if $rows != 10 then +if $rows != 10 then return -1 endi print =============== step4 sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step5 sql select * from $mt where ts > now + 4m and tgcol = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step6 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 200 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then return -1 endi print =============== step7 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step8 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 50 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 50 then return -1 endi print =============== step9 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step10 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step11 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step12 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt partition by tgcol interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/binary.sim b/tests/script/tsim/tag/binary.sim index b3f243b8c014c419e05634e2db2845295fc527d4..e0c02b4823ca43f8ee783628776079ca1f57a3fa 100644 --- a/tests/script/tsim/tag/binary.sim +++ b/tests/script/tsim/tag/binary.sim @@ -24,50 +24,50 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol binary(10)) $i = 0 while $i < 5 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( '0' ) + sql create table $tb using $mt tags( '0' ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw while $i < 10 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( '1' ) + sql create table $tb using $mt tags( '1' ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print =============== step2 sql select * from $tb -if $rows != $rowNum then +if $rows != $rowNum then return -1 endi sql select * from $tb where ts < now + 4m -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $tb where ts <= now + 4m -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $tb where ts > now + 4m -if $rows != 15 then +if $rows != 15 then return -1 endi sql select * from $tb where ts >= now + 4m -if $rows != 15 then +if $rows != 15 then return -1 endi sql select * from $tb where ts > now + 4m and ts < now + 5m -if $rows != 1 then +if $rows != 1 then return -1 endi sql select * from $tb where ts < now + 4m and ts > now + 5m @@ -83,155 +83,155 @@ if $rows != 0 then return -1 endi sql select * from $tb where ts > now + 4m and ts > now + 5m and ts < now + 6m -if $rows != 1 then +if $rows != 1 then return -1 endi print =============== step3 sql select * from $mt -if $rows != $totalNum then +if $rows != $totalNum then return -1 endi sql select * from $mt where ts < now + 4m -if $rows != 50 then +if $rows != 50 then return -1 endi sql select * from $mt where ts > now + 4m -if $rows != 150 then +if $rows != 150 then return -1 endi sql select * from $mt where ts = now + 4m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m -if $rows != 10 then +if $rows != 10 then return -1 endi print =============== step4 sql select * from $mt where tgcol = '0' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> '0' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = '1' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> '1' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = '1' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> '1' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = '0' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> '0' -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step5 sql select * from $mt where ts > now + 4m and tgcol = '1' -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> '1' -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol = '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol <> '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol = '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol <> '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol <> '0' -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> '0' and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step6 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 200 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then return -1 endi print =============== step7 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = '1' -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step8 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 50 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 50 then return -1 endi print =============== step9 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step10 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = '1' group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step11 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step12 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt partition by tgcol interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/binary_binary.sim b/tests/script/tsim/tag/binary_binary.sim index ad6c0ca1cbb2101d43a5c08c2fb14e10e5da1bee..b5ba3562acd244116dd79d25a58adebb41f03474 100644 --- a/tests/script/tsim/tag/binary_binary.sim +++ b/tests/script/tsim/tag/binary_binary.sim @@ -24,283 +24,283 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol binary(5), tgcol2 bina $i = 0 while $i < 5 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( '0', '0' ) + sql create table $tb using $mt tags( '0', '0' ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw while $i < 10 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( '1', '1' ) + sql create table $tb using $mt tags( '1', '1' ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print =============== step2 sql select * from $mt -if $rows != $totalNum then +if $rows != $totalNum then return -1 endi sql select * from $mt where ts < now + 4m -if $rows != 50 then +if $rows != 50 then return -1 endi sql select * from $mt where ts > now + 4m -if $rows != 150 then +if $rows != 150 then return -1 endi sql select * from $mt where ts = now + 4m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m -if $rows != 10 then +if $rows != 10 then return -1 endi print =============== step3 sql select * from $mt where tgcol = '0' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> '0' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = '1' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> '1' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = '1' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> '1' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = '0' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> '0' -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step4 sql select * from $mt where tgcol2 = '0' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> '0' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 = '1' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> '1' -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step5 sql select * from $mt where ts > now + 4m and tgcol = '1' -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> '1' -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol = '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol <> '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol = '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol <> '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol <> '0' -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> '0' and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step6 sql select * from $mt where ts > now + 4m and tgcol2 = '1' -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> '1' -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 = '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 <> '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 = '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 <> '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol2 <> '0' -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> '0' and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step7 sql select * from $mt where ts > now + 4m and tgcol2 = '1' and tgcol = '1' -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> '1' and tgcol <> '1' -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 = '0' and tgcol = '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 <> '0' and tgcol <> '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 = '0' and tgcol = '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 <> '0' and tgcol <> '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol2 <> '0' and tgcol <> '0' -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> '0' and ts < now + 5m and ts < now + 5m and tgcol <> '0' -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step8 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 200 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then return -1 endi print =============== step9 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = '1' -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol2 = '1' -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = '1' and tgcol2 = '1' -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step10 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 50 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 50 then return -1 endi print =============== step11 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step12 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = '1' group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol2 = '1' group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = '1' and tgcol2 = '1' group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step13 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step14 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt partition by tgcol interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/bool.sim b/tests/script/tsim/tag/bool.sim index c0f4c1ccdda2651d8d21add6573a2ff399b2f2ef..1473556841ddcf3ef08d0a11a651fb0571095385 100644 --- a/tests/script/tsim/tag/bool.sim +++ b/tests/script/tsim/tag/bool.sim @@ -24,50 +24,50 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol bool) $i = 0 while $i < 5 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 0 ) + sql create table $tb using $mt tags( 0 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw while $i < 10 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 1 ) + sql create table $tb using $mt tags( 1 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print =============== step2 sql select * from $tb -if $rows != $rowNum then +if $rows != $rowNum then return -1 endi sql select * from $tb where ts < now + 4m -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $tb where ts <= now + 4m -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $tb where ts > now + 4m -if $rows != 15 then +if $rows != 15 then return -1 endi sql select * from $tb where ts >= now + 4m -if $rows != 15 then +if $rows != 15 then return -1 endi sql select * from $tb where ts > now + 4m and ts < now + 5m -if $rows != 1 then +if $rows != 1 then return -1 endi sql select * from $tb where ts < now + 4m and ts > now + 5m @@ -79,40 +79,40 @@ if $rows != 0 then return -1 endi sql select * from $tb where ts > now + 4m and ts > now + 5m and ts < now + 6m -if $rows != 1 then +if $rows != 1 then return -1 endi print =============== step3 sql select * from $mt -if $rows != $totalNum then +if $rows != $totalNum then return -1 endi sql select * from $mt where ts < now + 4m -if $rows != 50 then +if $rows != 50 then return -1 endi sql select * from $mt where ts > now + 4m -if $rows != 150 then +if $rows != 150 then return -1 endi sql select * from $mt where ts = now + 4m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m -if $rows != 10 then +if $rows != 10 then return -1 endi print =============== step4 sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 @@ -121,115 +121,115 @@ if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = true -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> true -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = false -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> false -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step5 sql select * from $mt where ts > now + 4m and tgcol = true -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> true -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol = false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol <> false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol = false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol <> false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol <> false -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> false and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step6 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 200 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then return -1 endi print =============== step7 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = true -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step8 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 50 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 50 then return -1 endi print =============== step9 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step10 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = true group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step11 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step12 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt partition by tgcol interval(1d) print select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt partition by tgcol interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/bool_binary.sim b/tests/script/tsim/tag/bool_binary.sim index 627aea4495d42e294e4a24b5a55620e68f4c22ce..7fb15ec2cf541a553460014ed7eeabd7a9becede 100644 --- a/tests/script/tsim/tag/bool_binary.sim +++ b/tests/script/tsim/tag/bool_binary.sim @@ -24,283 +24,283 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol bool, tgcol2 binary(5) $i = 0 while $i < 5 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 0, '0' ) + sql create table $tb using $mt tags( 0, '0' ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw while $i < 10 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 1, '1' ) + sql create table $tb using $mt tags( 1, '1' ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print =============== step2 sql select * from $mt -if $rows != $totalNum then +if $rows != $totalNum then return -1 endi sql select * from $mt where ts < now + 4m -if $rows != 50 then +if $rows != 50 then return -1 endi sql select * from $mt where ts > now + 4m -if $rows != 150 then +if $rows != 150 then return -1 endi sql select * from $mt where ts = now + 4m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m -if $rows != 10 then +if $rows != 10 then return -1 endi print =============== step3 sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = true -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> true -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = false -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> false -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step4 sql select * from $mt where tgcol2 = '0' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> '0' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 = '1' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> '1' -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step5 sql select * from $mt where ts > now + 4m and tgcol = true -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> true -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol = false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol <> false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol = false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol <> false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol <> false -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> false and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step6 sql select * from $mt where ts > now + 4m and tgcol2 = '1' -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> '1' -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 = '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 <> '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 = '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 <> '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol2 <> '0' -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> '0' and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step7 sql select * from $mt where ts > now + 4m and tgcol2 = '1' and tgcol = true -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> '1' and tgcol <> true -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 = '0' and tgcol = false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 <> '0' and tgcol <> false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 = '0' and tgcol = false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 <> '0' and tgcol <> false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol2 <> '0' and tgcol <> false -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> '0' and ts < now + 5m and ts < now + 5m and tgcol <> false -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step8 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 200 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then return -1 endi print =============== step9 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = true -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol2 = '1' -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = true and tgcol2 = '1' -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step10 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 50 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 50 then return -1 endi print =============== step11 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step12 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = true group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol2 = '1' group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = true and tgcol2 = '1' group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step13 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step14 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt partition by tgcol interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/bool_int.sim b/tests/script/tsim/tag/bool_int.sim index 1e291573ef5c22a6c477cdb23d01cb4788fe43cf..f18da59d9cd94f650166bea938312a471907f322 100644 --- a/tests/script/tsim/tag/bool_int.sim +++ b/tests/script/tsim/tag/bool_int.sim @@ -24,299 +24,299 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol bool, tgcol2 int) $i = 0 while $i < 5 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 0, 0 ) + sql create table $tb using $mt tags( 0, 0 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw while $i < 10 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 1, 1 ) + sql create table $tb using $mt tags( 1, 1 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print =============== step2 sql select * from $mt -if $rows != $totalNum then +if $rows != $totalNum then return -1 endi sql select * from $mt where ts < now + 4m -if $rows != 50 then +if $rows != 50 then return -1 endi sql select * from $mt where ts > now + 4m -if $rows != 150 then +if $rows != 150 then return -1 endi sql select * from $mt where ts = now + 4m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m -if $rows != 10 then +if $rows != 10 then return -1 endi print =============== step3 sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = true -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> true -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = false -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> false -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step4 sql select * from $mt where tgcol2 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 = true -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> true -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 = false -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> false -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step5 sql select * from $mt where ts > now + 4m and tgcol = true -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> true -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol = false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol <> false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol = false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol <> false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol <> false -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> false and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step6 sql select * from $mt where ts > now + 4m and tgcol2 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol2 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step7 sql select * from $mt where ts > now + 4m and tgcol2 = 1 and tgcol = true -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 1 and tgcol <> true -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 = 0 and tgcol = false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 <> 0 and tgcol <> false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 = 0 and tgcol = false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 <> 0 and tgcol <> false -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol2 <> 0 and tgcol <> false -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 0 and ts < now + 5m and ts < now + 5m and tgcol <> false -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step8 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 200 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then return -1 endi print =============== step9 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = true -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol2 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = true and tgcol2 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step10 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 50 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 50 then return -1 endi print =============== step11 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step12 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = true group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol2 = 1 group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = true and tgcol2 = 1 group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step13 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step14 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt partition by tgcol interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/change.sim b/tests/script/tsim/tag/change.sim index 53f9f493966986f00fa5cf5defb7b9f10f24d2bd..d44877c99a244ffd2c1060950f004c5a967d8406 100644 --- a/tests/script/tsim/tag/change.sim +++ b/tests/script/tsim/tag/change.sim @@ -25,18 +25,18 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol1 = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi @@ -62,18 +62,18 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 smallint, tgcol2 tinyint) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol1 = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi @@ -86,15 +86,15 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bigint, tgcol2 float) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol1 = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi if $data03 != 2.00000 then @@ -103,61 +103,61 @@ endi sql alter table $mt rename tag tgcol1 tgcol3 sql alter table $mt rename tag tgcol2 tgcol4 - + print =============== step5 $i = 5 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 double, tgcol2 binary(10)) sql create table $tb using $mt tags( 1, '2' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = '2' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1.000000000 then +if $data02 != 1.000000000 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi sql alter table $mt rename tag tgcol1 tgcol3 sql alter table $mt rename tag tgcol2 tgcol4 - + print =============== step6 $i = 6 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 int, tgcol3 smallint, tgcol4 binary(11), tgcol5 double, tgcol6 binary(20)) sql create table $tb using $mt tags( '1', 2, 3, '4', 5, '6' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol1 = '1' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi -if $data05 != 4 then +if $data05 != 4 then return -1 endi -if $data06 != 5.000000000 then +if $data06 != 5.000000000 then return -1 endi -if $data07 != 6 then +if $data07 != 6 then return -1 endi @@ -187,31 +187,31 @@ step25: sql select * from $mt where tgcol3 = 1 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi sql select * from $mt where tgcol4 = 2 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi @@ -229,34 +229,34 @@ step32: sql select * from $mt where tgcol3 = 1 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi sql select * from $mt where tgcol4 = 2 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi - + print =============== step4 $i = 4 $mt = $mtPrefix . $i @@ -271,13 +271,13 @@ step42: sql select * from $mt where tgcol3 = 1 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi if $data03 != 2.00000 then @@ -298,7 +298,7 @@ endi if $data03 != 2.00000 then return -1 endi - + print =============== step5 $i = 5 $mt = $mtPrefix . $i @@ -313,34 +313,34 @@ step52: sql select * from $mt where tgcol3 < 2 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1.000000000 then +if $data02 != 1.000000000 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi sql select * from $mt where tgcol4 = '2' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1.000000000 then +if $data02 != 1.000000000 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi - + print =============== step6 $i = 6 $mt = $mtPrefix . $i @@ -367,144 +367,144 @@ step66: sql select * from $mt where tgcol7 = '1' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 4 then +if $data04 != 4 then return -1 endi -if $data05 != 5.000000000 then +if $data05 != 5.000000000 then return -1 endi -if $data06 != 6 then +if $data06 != 6 then return -1 endi -if $data07 != null then +if $data07 != null then return -1 endi sql select * from $mt where tgcol8 = 2 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 4 then +if $data04 != 4 then return -1 endi -if $data05 != 5.000000000 then +if $data05 != 5.000000000 then return -1 endi -if $data06 != 6 then +if $data06 != 6 then return -1 endi -if $data07 != null then +if $data07 != null then return -1 endi sql select * from $mt where tgcol9 = '4' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 4 then +if $data04 != 4 then return -1 endi -if $data05 != 5.000000000 then +if $data05 != 5.000000000 then return -1 endi -if $data06 != 6 then +if $data06 != 6 then return -1 endi -if $data07 != null then +if $data07 != null then return -1 endi sql select * from $mt where tgcol10 = 5 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 4 then +if $data04 != 4 then return -1 endi -if $data05 != 5.000000000 then +if $data05 != 5.000000000 then return -1 endi -if $data06 != 6 then +if $data06 != 6 then return -1 endi -if $data07 != null then +if $data07 != null then return -1 endi sql select * from $mt where tgcol11 = '6' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 4 then +if $data04 != 4 then return -1 endi -if $data05 != 5.000000000 then +if $data05 != 5.000000000 then return -1 endi -if $data06 != 6 then +if $data06 != 6 then return -1 endi -if $data07 != null then +if $data07 != null then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/column.sim b/tests/script/tsim/tag/column.sim index cc692900ce625317ead828310117abdc65cce598..041bc0a117856551a40f09d8961a7ca3e0e7d9ec 100644 --- a/tests/script/tsim/tag/column.sim +++ b/tests/script/tsim/tag/column.sim @@ -31,7 +31,7 @@ sql create table $tb using $mt tags( 0, '0' ) $i = 1 $tb = $tbPrefix . $i -sql create table $tb using $mt tags( 1, '1' ) +sql create table $tb using $mt tags( 1, '1' ) $i = 2 $tb = $tbPrefix . $i @@ -66,26 +66,26 @@ sql insert into $tb values(now, '3', '3') print =============== step4 sql select * from $mt where tgcol2 = '1' -if $rows != 1 then +if $rows != 1 then return -1 endi print =============== step5 sql select * from $mt -if $rows != 4 then +if $rows != 4 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/commit.sim b/tests/script/tsim/tag/commit.sim index cc63e1670014e907ca3a0a832b7a34d1630c311f..e4f839e613472ec319980c19ca47e790d668baf1 100644 --- a/tests/script/tsim/tag/commit.sim +++ b/tests/script/tsim/tag/commit.sim @@ -25,39 +25,39 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi sql alter table $mt drop tag tgcol2 sql alter table $mt add tag tgcol4 int sql reset query cache -sql alter table $tb set tag tgcol4 =4 +sql alter table $tb set tag tgcol4 =4 sql reset query cache sql select * from $mt where tgcol4 = 4 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi @@ -71,61 +71,61 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 smallint, tgcol2 tinyint) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi sql alter table $mt drop tag tgcol2 sql alter table $mt add tag tgcol4 tinyint sql reset query cache -sql alter table $tb set tag tgcol4=4 +sql alter table $tb set tag tgcol4=4 sql reset query cache sql select * from $mt where tgcol4 = 4 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi sql select * from $mt where tgcol2 = 1 -x step3 return -1 step3: - + print =============== step4 $i = 4 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bigint, tgcol2 float) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi if $data03 != 2.00000 then @@ -133,34 +133,34 @@ if $data03 != 2.00000 then endi sql describe $tb -if $data21 != BIGINT then +if $data21 != BIGINT then return -1 endi -if $data31 != FLOAT then +if $data31 != FLOAT then return -1 endi -if $data23 != TAG then +if $data23 != TAG then return -1 endi -if $data33 != TAG then +if $data33 != TAG then return -1 endi sql alter table $mt drop tag tgcol2 sql alter table $mt add tag tgcol4 float sql reset query cache -sql alter table $tb set tag tgcol4=4 +sql alter table $tb set tag tgcol4=4 sql reset query cache sql select * from $mt where tgcol4 = 4 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi if $data03 != 4.00000 then @@ -170,184 +170,184 @@ endi sql select * from $mt where tgcol2 = 1 -x step4 return -1 step4: - + print =============== step5 $i = 5 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 double, tgcol2 binary(10)) sql create table $tb using $mt tags( 1, '2' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = '2' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1.000000000 then +if $data02 != 1.000000000 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi sql alter table $mt drop tag tgcol2 sql alter table $mt add tag tgcol4 smallint sql reset query cache -sql alter table $tb set tag tgcol4=4 +sql alter table $tb set tag tgcol4=4 sql reset query cache sql select * from $mt where tgcol4 = 4 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1.000000000 then +if $data02 != 1.000000000 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi sql select * from $mt where tgcol3 = '1' -x step5 return -1 step5: - + print =============== step6 $i = 6 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int, tgcol3 tinyint) sql create table $tb using $mt tags( 1, 2, 3 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi sql alter table $mt rename tag tgcol1 tgcol4 -sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol3 sql alter table $mt add tag tgcol5 binary(10) sql alter table $mt add tag tgcol6 binary(10) sql reset query cache -sql alter table $tb set tag tgcol4=false -sql alter table $tb set tag tgcol5='5' -sql alter table $tb set tag tgcol6='6' +sql alter table $tb set tag tgcol4=false +sql alter table $tb set tag tgcol5='5' +sql alter table $tb set tag tgcol6='6' sql reset query cache sql select * from $mt where tgcol5 = '5' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 0 then return -1 endi -if $data03 != 5 then +if $data03 != 5 then return -1 endi -if $data04 != 6 then +if $data04 != 6 then return -1 endi sql select * from $mt where tgcol6 = '6' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 0 then return -1 endi -if $data03 != 5 then +if $data03 != 5 then return -1 endi -if $data04 != 6 then +if $data04 != 6 then return -1 endi sql select * from $mt where tgcol4 = 1 -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where tgcol3 = 1 -x step52 return -1 step52: - + print =============== step7 $i = 7 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 smallint, tgcol2 tinyint, tgcol3 binary(10)) sql create table $tb using $mt tags( 1, 2, '3' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol3 = '3' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi sql alter table $mt rename tag tgcol1 tgcol4 -sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol3 sql alter table $mt add tag tgcol5 bigint sql alter table $mt add tag tgcol6 tinyint sql reset query cache -sql alter table $tb set tag tgcol4=4 -sql alter table $tb set tag tgcol5=5 -sql alter table $tb set tag tgcol6=6 +sql alter table $tb set tag tgcol4=4 +sql alter table $tb set tag tgcol5=5 +sql alter table $tb set tag tgcol6=6 sql reset query cache sql select * from $mt where tgcol6 = 6 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 4 then +if $data02 != 4 then return -1 endi -if $data03 != 5 then +if $data03 != 5 then return -1 endi -if $data04 != 6 then +if $data04 != 6 then return -1 endi @@ -357,54 +357,54 @@ step71: sql select * from $mt where tgcol3 = 1 -x step72 return -1 step72: - + print =============== step8 $i = 8 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bigint, tgcol2 float, tgcol3 binary(10)) sql create table $tb using $mt tags( 1, 2, '3' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol3 = '3' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi if $data03 != 2.00000 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi sql alter table $mt rename tag tgcol1 tgcol4 -sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol3 sql alter table $mt add tag tgcol5 binary(17) sql alter table $mt add tag tgcol6 bool sql reset query cache -sql alter table $tb set tag tgcol4=4 -sql alter table $tb set tag tgcol5='5' +sql alter table $tb set tag tgcol4=4 +sql alter table $tb set tag tgcol5='5' sql alter table $tb set tag tgcol6=1 sql reset query cache sql select * from $mt where tgcol5 = '5' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 4 then +if $data02 != 4 then return -1 endi -if $data03 != 5 then +if $data03 != 5 then return -1 endi if $data04 != 1 then @@ -424,45 +424,45 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 double, tgcol2 binary(10), tgcol3 binary(10)) sql create table $tb using $mt tags( 1, '2', '3' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = '2' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1.000000000 then +if $data02 != 1.000000000 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi sql alter table $mt rename tag tgcol1 tgcol4 -sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol3 sql alter table $mt add tag tgcol5 bool sql alter table $mt add tag tgcol6 float sql reset query cache -sql alter table $tb set tag tgcol4=4 +sql alter table $tb set tag tgcol4=4 sql alter table $tb set tag tgcol5=1 sql alter table $tb set tag tgcol6=6 sql reset query cache sql select * from $mt where tgcol5 = 1 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 4.000000000 then +if $data02 != 4.000000000 then return -1 endi if $data03 != 1 then @@ -485,24 +485,24 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 binary(10), tgcol3 binary(10), tgcol4 binary(10)) sql create table $tb using $mt tags( '1', '2', '3', '4' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol4 = '4' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi -if $data05 != 4 then +if $data05 != 4 then return -1 endi @@ -510,7 +510,7 @@ sql alter table $mt rename tag tgcol1 tgcol4 -x step103 return -1 step103: -sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol3 sql alter table $mt drop tag tgcol4 sql reset query cache @@ -524,22 +524,22 @@ sql reset query cache sql select * from $mt where tgcol4 = '4' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi if $data04 != 0 then return -1 endi -if $data05 != null then +if $data05 != null then return -1 endi @@ -556,27 +556,27 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int, tgcol3 smallint, tgcol4 float, tgcol5 binary(10)) sql create table $tb using $mt tags( 1, 2, 3, 4, '5' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol1 = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi if $data05 != 4.00000 then return -1 endi -if $data06 != 5 then +if $data06 != 5 then return -1 endi @@ -584,7 +584,7 @@ sql alter table $mt rename tag tgcol1 tgcol4 -x step114 return -1 step114: -sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol3 sql alter table $mt drop tag tgcol4 sql alter table $mt drop tag tgcol5 @@ -596,7 +596,7 @@ sql alter table $mt add tag tgcol7 bigint sql alter table $mt add tag tgcol8 smallint sql reset query cache -sql alter table $tb set tag tgcol4='4' +sql alter table $tb set tag tgcol4='4' sql alter table $tb set tag tgcol5=5 sql alter table $tb set tag tgcol6='6' sql alter table $tb set tag tgcol7=7 @@ -605,28 +605,28 @@ sql reset query cache sql select * from $mt where tgcol5 =5 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi -if $data04 != 5 then +if $data04 != 5 then return -1 endi -if $data05 != 6 then +if $data05 != 6 then return -1 endi -if $data06 != 7 then +if $data06 != 7 then return -1 endi -if $data07 != 8 then +if $data07 != 8 then return -1 endi @@ -646,34 +646,34 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 smallint, tgcol3 float, tgcol4 double, tgcol5 binary(10), tgcol6 binary(20)) sql create table $tb using $mt tags( 1, 2, 3, 4, '5', '6' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol1 = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi if $data04 != 3.00000 then return -1 endi -if $data05 != 4.000000000 then +if $data05 != 4.000000000 then return -1 endi -if $data06 != 5 then +if $data06 != 5 then return -1 endi -if $data07 != 6 then +if $data07 != 6 then return -1 endi -sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol3 sql alter table $mt drop tag tgcol4 sql alter table $mt drop tag tgcol5 @@ -684,8 +684,8 @@ sql alter table $mt add tag tgcol4 binary(10) sql alter table $mt add tag tgcol5 bigint sql reset query cache -sql alter table $tb set tag tgcol1=false -sql alter table $tb set tag tgcol2='5' +sql alter table $tb set tag tgcol1=false +sql alter table $tb set tag tgcol2='5' sql alter table $tb set tag tgcol3=4 sql alter table $tb set tag tgcol4='3' sql alter table $tb set tag tgcol5=2 @@ -694,28 +694,28 @@ sql reset query cache sql select * from $mt where tgcol4 = '3' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 0 then return -1 endi -if $data03 != 1 then +if $data03 != 1 then return -1 endi -if $data04 != 5 then +if $data04 != 5 then return -1 endi -if $data05 != 4 then +if $data05 != 4 then return -1 endi -if $data06 != 3 then +if $data06 != 3 then return -1 endi -if $data07 != 2 then +if $data07 != 2 then return -1 endi @@ -745,34 +745,34 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 int, tgcol3 smallint, tgcol4 binary(11), tgcol5 double, tgcol6 binary(20)) sql create table $tb using $mt tags( '1', 2, 3, '4', 5, '6' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol1 = '1' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi -if $data05 != 4 then +if $data05 != 4 then return -1 endi -if $data06 != 5.000000000 then +if $data06 != 5.000000000 then return -1 endi -if $data07 != 6 then +if $data07 != 6 then return -1 endi -sql alter table $mt drop tag tgcol2 +sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol4 sql alter table $mt drop tag tgcol6 sql reset query cache @@ -781,8 +781,8 @@ sql alter table $mt add tag tgcol4 int sql alter table $mt add tag tgcol6 bigint sql reset query cache -sql alter table $tb set tag tgcol1='7' -sql alter table $tb set tag tgcol2='8' +sql alter table $tb set tag tgcol1='7' +sql alter table $tb set tag tgcol2='8' sql alter table $tb set tag tgcol3=9 sql alter table $tb set tag tgcol4=10 sql alter table $tb set tag tgcol5=11 @@ -791,28 +791,28 @@ sql reset query cache sql select * from $mt where tgcol2 = '8' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 7 then +if $data02 != 7 then return -1 endi -if $data03 != 9 then +if $data03 != 9 then return -1 endi -if $data04 != 11.000000000 then +if $data04 != 11.000000000 then return -1 endi -if $data05 != 8 then +if $data05 != 8 then return -1 endi -if $data06 != 10 then +if $data06 != 10 then return -1 endi -if $data07 != 12 then +if $data07 != 12 then return -1 endi @@ -832,16 +832,16 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol4 = 4 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi @@ -852,19 +852,19 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol4 = 4 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi - + print =============== step4 $i = 4 $mt = $mtPrefix . $i @@ -872,20 +872,20 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol4 = 4 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi if $data03 != 4.00000 then return -1 endi - + print =============== step5 $i = 5 $mt = $mtPrefix . $i @@ -893,20 +893,20 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol4 = 4 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1.000000000 then +if $data02 != 1.000000000 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi - + print =============== step6 $i = 6 $mt = $mtPrefix . $i @@ -914,45 +914,45 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol5 = '5' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 0 then return -1 endi -if $data03 != 5 then +if $data03 != 5 then return -1 endi -if $data04 != 6 then +if $data04 != 6 then return -1 endi sql select * from $mt where tgcol6 = '6' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 0 then return -1 endi -if $data03 != 5 then +if $data03 != 5 then return -1 endi -if $data04 != 6 then +if $data04 != 6 then return -1 endi sql select * from $mt where tgcol4 = 1 -if $rows != 0 then +if $rows != 0 then return -1 endi - + print =============== step7 $i = 7 $mt = $mtPrefix . $i @@ -960,23 +960,23 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol6 = 6 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 4 then +if $data02 != 4 then return -1 endi -if $data03 != 5 then +if $data03 != 5 then return -1 endi -if $data04 != 6 then +if $data04 != 6 then return -1 endi - + print =============== step8 $i = 8 $mt = $mtPrefix . $i @@ -984,16 +984,16 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol5 = '5' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 4 then +if $data02 != 4 then return -1 endi -if $data03 != 5 then +if $data03 != 5 then return -1 endi if $data04 != 1 then @@ -1008,13 +1008,13 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol5 = 1 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 4.000000000 then +if $data02 != 4.000000000 then return -1 endi if $data03 != 1 then @@ -1032,22 +1032,22 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol4 = '4' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi if $data04 != 0 then return -1 endi -if $data05 != null then +if $data05 != null then return -1 endi @@ -1058,28 +1058,28 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol5 =5 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi -if $data04 != 5 then +if $data04 != 5 then return -1 endi -if $data05 != 6 then +if $data05 != 6 then return -1 endi -if $data06 != 7 then +if $data06 != 7 then return -1 endi -if $data07 != 8 then +if $data07 != 8 then return -1 endi @@ -1091,28 +1091,28 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol4 = '3' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 0 then return -1 endi -if $data03 != 1 then +if $data03 != 1 then return -1 endi -if $data04 != 5 then +if $data04 != 5 then return -1 endi -if $data05 != 4 then +if $data05 != 4 then return -1 endi -if $data06 != 3 then +if $data06 != 3 then return -1 endi -if $data07 != 2 then +if $data07 != 2 then return -1 endi @@ -1143,36 +1143,36 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol2 = '8' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 7 then +if $data02 != 7 then return -1 endi -if $data03 != 9 then +if $data03 != 9 then return -1 endi -if $data04 != 11.000000000 then +if $data04 != 11.000000000 then return -1 endi -if $data05 != 8 then +if $data05 != 8 then return -1 endi -if $data06 != 10 then +if $data06 != 10 then return -1 endi -if $data07 != 12 then +if $data07 != 12 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/create.sim b/tests/script/tsim/tag/create.sim index da683389cb5b30f3f0d8ff520fb5b92ecddf63d8..1db2251da03891031f082322afb2f2d4628ae959 100644 --- a/tests/script/tsim/tag/create.sim +++ b/tests/script/tsim/tag/create.sim @@ -25,107 +25,107 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol bool) sql create table $tb using $mt tags( 1 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi - + print =============== step3 $i = 3 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol smallint) sql create table $tb using $mt tags( 1 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi - + print =============== step4 $i = 4 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol tinyint) sql create table $tb using $mt tags( 1 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi - + print =============== step5 $i = 5 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int) sql create table $tb using $mt tags( 1 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi - + print =============== step6 $i = 6 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol bigint) sql create table $tb using $mt tags( 1 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi - + print =============== step7 $i = 7 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol float) sql create table $tb using $mt tags( 1 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol = 0 @@ -133,62 +133,62 @@ if $rows != 0 then print expect 0, actual: $rows return -1 endi - + print =============== step8 $i = 8 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol double) sql create table $tb using $mt tags( 1 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi - + print =============== step9 $i = 9 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol binary(10)) sql create table $tb using $mt tags( '1') -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol = '1' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol = '0' -if $rows != 0 then +if $rows != 0 then return -1 endi - + print =============== step10 $i = 10 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol bool, tgcol2 bool) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 1 if $rows != 1 then print expect 1, actual: $rows return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol2 = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -198,16 +198,16 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol bool, tgcol2 smallint) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol2 = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -217,16 +217,16 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol bool, tgcol2 tinyint) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol2 = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -236,16 +236,16 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol bool, tgcol2 int) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol2 = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -255,17 +255,17 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol bool, tgcol2 bigint) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol2 = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi print =============== step15 @@ -274,16 +274,16 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol bool, tgcol2 float) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol2 = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -293,16 +293,16 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol bool, tgcol2 double) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol2 = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -312,16 +312,16 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol bool, tgcol2 binary(10)) sql create table $tb using $mt tags( 1, '2' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol = true -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol2 = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -331,16 +331,16 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol smallint, tgcol2 tinyint) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol2 = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -350,16 +350,16 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol tinyint, tgcol2 int) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol2 = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -369,16 +369,16 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int, tgcol2 bigint) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol2 = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -388,16 +388,16 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol bigint, tgcol2 float) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol2 = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -407,16 +407,16 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol float, tgcol2 double) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol2 = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -426,16 +426,16 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol double, tgcol2 binary(10)) sql create table $tb using $mt tags( 1, '2' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = '2' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol2 = 0 -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -445,51 +445,51 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 bool, tgcol3 int, tgcol4 float, tgcol5 double, tgcol6 binary(10)) sql create table $tb using $mt tags( 1, 2, 3, 4, 5, '6' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol1 = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol2 = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol3 = 3 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol4 = 4 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol5 = 5 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol6 = '6' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol6 = '0' -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -499,16 +499,16 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol bool, tgcol2 int, tgcol3 float, tgcol4 double, tgcol5 binary(10), tgcol6 binary(10)) sql create table $tb using $mt tags( 1, 2, 3, 4, '5', '6' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol6 = '6' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol6 = '0' -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -518,16 +518,16 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol binary(10), tgcol2 binary(10), tgcol3 binary(10), tgcol4 binary(10), tgcol5 binary(10), tgcol6 binary(10)) sql create table $tb using $mt tags( '1', '2', '3', '4', '5', '6' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol3 = '3' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi sql select * from $mt where tgcol3 = '0' -if $rows != 0 then +if $rows != 0 then return -1 endi @@ -545,12 +545,12 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol binary(250), tgcol2 binary(250)) sql create table $tb using $mt tags('1', '1') -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol = '1' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi @@ -560,12 +560,12 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol binary(25), tgcol2 binary(250)) sql create table $tb using $mt tags('1', '1') -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol = '1' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi @@ -575,7 +575,7 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol binary(250), tgcol2 binary(250), tgcol3 binary(30)) -x step30 # return -1 -step30: +step30: print =============== step31 $i = 31 @@ -584,16 +584,16 @@ $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol binary(5)) sql_error create table $tb using $mt tags('1234567') sql create table $tb using $mt tags('12345') -sql insert into $tb values(now, 1) -sql select * from $mt +sql insert into $tb values(now, 1) +sql select * from $mt print sql select * from $mt -if $rows != 1 then +if $rows != 1 then return -1 endi print $data00 $data01 $data02 -if $data02 != 12345 then +if $data02 != 12345 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/delete.sim b/tests/script/tsim/tag/delete.sim index 36ef1110f8093228f1643b15547d4f96b67ea787..acf99cc874c90170c53a393ae511c340a77d18fc 100644 --- a/tests/script/tsim/tag/delete.sim +++ b/tests/script/tsim/tag/delete.sim @@ -25,18 +25,18 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi @@ -48,38 +48,38 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 smallint, tgcol2 tinyint) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi sql alter table $mt drop tag tgcol2 - + print =============== step4 $i = 4 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bigint, tgcol2 float) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 < 3 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi if $data03 != 2.00000 then @@ -87,13 +87,13 @@ if $data03 != 2.00000 then endi sql describe $tb -if $data21 != BIGINT then +if $data21 != BIGINT then return -1 endi -if $data31 != FLOAT then +if $data31 != FLOAT then return -1 endi -if $data23 != TAG then +if $data23 != TAG then return -1 endi @@ -101,25 +101,25 @@ sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol1 -x step40 return -1 step40: - + print =============== step5 $i = 5 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 double, tgcol2 binary(10)) sql create table $tb using $mt tags( 1, '2' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = '2' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1.000000000 then +if $data02 != 1.000000000 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi @@ -127,111 +127,111 @@ sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol1 -x step50 return -1 step50: - + print =============== step6 $i = 6 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int, tgcol3 tinyint) sql create table $tb using $mt tags( 1, 2, 3 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol3 - + print =============== step7 $i = 7 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 smallint, tgcol2 tinyint, tgcol3 binary(10)) sql create table $tb using $mt tags( 1, 2, '3' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol3 = '3' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi sql describe $tb -if $data21 != SMALLINT then +if $data21 != SMALLINT then return -1 endi -if $data31 != TINYINT then +if $data31 != TINYINT then return -1 endi -if $data41 != VARCHAR then +if $data41 != VARCHAR then return -1 endi -if $data22 != 2 then +if $data22 != 2 then return -1 endi -if $data32 != 1 then +if $data32 != 1 then return -1 endi -if $data42 != 10 then +if $data42 != 10 then return -1 endi -if $data23 != TAG then +if $data23 != TAG then return -1 endi -if $data33 != TAG then +if $data33 != TAG then return -1 endi -if $data43 != TAG then +if $data43 != TAG then return -1 endi sql alter table $mt drop tag tgcol2 sql alter table $mt drop tag tgcol3 - + print =============== step8 $i = 8 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bigint, tgcol2 float, tgcol3 binary(10)) sql create table $tb using $mt tags( 1, 2, '3' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol3 = '3' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi if $data03 != 2.00000 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi @@ -244,21 +244,21 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 double, tgcol2 binary(10), tgcol3 binary(10)) sql create table $tb using $mt tags( 1, '2', '3' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = 2 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1.000000000 then +if $data02 != 1.000000000 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi @@ -271,24 +271,24 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 binary(10), tgcol3 binary(10), tgcol4 binary(10)) sql create table $tb using $mt tags( '1', '2', '3', '4' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol4 = '4' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi -if $data05 != 4 then +if $data05 != 4 then return -1 endi @@ -302,27 +302,27 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int, tgcol3 smallint, tgcol4 float, tgcol5 binary(10)) sql create table $tb using $mt tags( 1, 2, 3, 4, '5' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol1 = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi if $data05 != 4.00000 then return -1 endi -if $data06 != 5 then +if $data06 != 5 then return -1 endi @@ -336,30 +336,30 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 smallint, tgcol3 float, tgcol4 double, tgcol5 binary(10), tgcol6 binary(20)) sql create table $tb using $mt tags( 1, 2, 3, 4, '5', '6' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol1 = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi if $data04 != 3.00000 then return -1 endi -if $data05 != 4.000000000 then +if $data05 != 4.000000000 then return -1 endi -if $data06 != 5 then +if $data06 != 5 then return -1 endi -if $data07 != 6 then +if $data07 != 6 then return -1 endi @@ -374,30 +374,30 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 int, tgcol3 smallint, tgcol4 binary(11), tgcol5 double, tgcol6 binary(20)) sql create table $tb using $mt tags( '1', 2, 3, '4', 5, '6' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol1 = '1' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi -if $data05 != 4 then +if $data05 != 4 then return -1 endi -if $data06 != 5.000000000 then +if $data06 != 5.000000000 then return -1 endi -if $data07 != 6 then +if $data07 != 6 then return -1 endi @@ -412,16 +412,16 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol1 = 1 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != null then +if $data03 != null then return -1 endi @@ -436,23 +436,23 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol1 = 1 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != null then +if $data03 != null then return -1 endi sql select * from $mt where tgcol2 = 1 -x step3 return -1 step3: - + print =============== step4 $i = 4 $mt = $mtPrefix . $i @@ -460,23 +460,23 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol1 = 1 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != null then +if $data03 != null then return -1 endi sql select * from $mt where tgcol2 = 1 -x step4 return -1 step4: - + print =============== step5 $i = 5 $mt = $mtPrefix . $i @@ -484,23 +484,23 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol1 = 1 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1.000000000 then +if $data02 != 1.000000000 then return -1 endi -if $data03 != null then +if $data03 != null then return -1 endi sql select * from $mt where tgcol2 = '1' -x step5 return -1 step5: - + print =============== step6 $i = 6 $mt = $mtPrefix . $i @@ -508,19 +508,19 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol1 = 1 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != null then +if $data03 != null then return -1 endi -if $data04 != null then +if $data04 != null then return -1 endi @@ -530,7 +530,7 @@ step51: sql select * from $mt where tgcol3 = 1 -x step52 return -1 step52: - + print =============== step7 $i = 7 $mt = $mtPrefix . $i @@ -538,19 +538,19 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol1 = 1 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != null then +if $data03 != null then return -1 endi -if $data04 != null then +if $data04 != null then return -1 endi @@ -560,7 +560,7 @@ step71: sql select * from $mt where tgcol3 = 1 -x step72 return -1 step72: - + print =============== step8 $i = 8 $mt = $mtPrefix . $i @@ -568,19 +568,19 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol1 = 1 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != null then +if $data03 != null then return -1 endi -if $data04 != null then +if $data04 != null then return -1 endi @@ -598,19 +598,19 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol1 = 1 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1.000000000 then +if $data02 != 1.000000000 then return -1 endi -if $data03 != null then +if $data03 != null then return -1 endi -if $data04 != null then +if $data04 != null then return -1 endi @@ -628,22 +628,22 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol1 = '1' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != null then +if $data03 != null then return -1 endi -if $data04 != null then +if $data04 != null then return -1 endi -if $data05 != null then +if $data05 != null then return -1 endi @@ -664,10 +664,10 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol4=4 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then @@ -676,13 +676,13 @@ endi if $data03 != 4.00000 then return -1 endi -if $data04 != null then +if $data04 != null then return -1 endi -if $data05 != null then +if $data05 != null then return -1 endi -if $data06 != null then +if $data06 != null then return -1 endi @@ -703,28 +703,28 @@ $tb = $tbPrefix . $i sql select * from $mt where tgcol4 = 4 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 4.000000000 then +if $data03 != 4.000000000 then return -1 endi -if $data04 != null then +if $data04 != null then return -1 endi -if $data05 != null then +if $data05 != null then return -1 endi -if $data06 != null then +if $data06 != null then return -1 endi -if $data07 != null then +if $data07 != null then return -1 endi @@ -749,28 +749,28 @@ $tb = $tbPrefix . $i sql reset query cache sql select * from $mt where tgcol2 = 2 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 5.000000000 then +if $data04 != 5.000000000 then return -1 endi -if $data05 != null then +if $data05 != null then return -1 endi -if $data06 != null then +if $data06 != null then return -1 endi -if $data07 != null then +if $data07 != null then return -1 endi @@ -790,7 +790,7 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 bigint) sql create table $tb using $mt tags( 1, 1 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql alter table xxmt drop tag tag1 -x step141 return -1 @@ -814,8 +814,8 @@ step145: print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/double.sim b/tests/script/tsim/tag/double.sim index 7af2f19c597b6acc1860a9d9f11b14a64a3ebfda..fbdf9733370b19eb2506801aaf61490e05ae6ed0 100644 --- a/tests/script/tsim/tag/double.sim +++ b/tests/script/tsim/tag/double.sim @@ -24,50 +24,50 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol double) $i = 0 while $i < 5 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 0 ) + sql create table $tb using $mt tags( 0 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw while $i < 10 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 1 ) + sql create table $tb using $mt tags( 1 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print =============== step2 sql select * from $tb -if $rows != $rowNum then +if $rows != $rowNum then return -1 endi sql select * from $tb where ts < now + 4m -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $tb where ts <= now + 4m -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $tb where ts > now + 4m -if $rows != 15 then +if $rows != 15 then return -1 endi sql select * from $tb where ts >= now + 4m -if $rows != 15 then +if $rows != 15 then return -1 endi sql select * from $tb where ts > now + 4m and ts < now + 5m -if $rows != 1 then +if $rows != 1 then return -1 endi sql select * from $tb where ts < now + 4m and ts > now + 5m @@ -83,156 +83,156 @@ if $rows != 0 then return -1 endi sql select * from $tb where ts > now + 4m and ts > now + 5m and ts < now + 6m -if $rows != 1 then +if $rows != 1 then return -1 endi print =============== step3 sql select * from $mt -if $rows != $totalNum then +if $rows != $totalNum then return -1 endi sql select * from $mt where ts < now + 4m -if $rows != 50 then +if $rows != 50 then return -1 endi sql select * from $mt where ts > now + 4m -if $rows != 150 then +if $rows != 150 then return -1 endi sql select * from $mt where ts = now + 4m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m -if $rows != 10 then +if $rows != 10 then return -1 endi print =============== step4 sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step5 sql select * from $mt where ts > now + 4m and tgcol = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step6 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 200 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then return -1 endi print =============== step7 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step8 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 50 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 50 then return -1 endi print =============== step9 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step10 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step11 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step12 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt partition by tgcol interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/filter.sim b/tests/script/tsim/tag/filter.sim index 9fb5f66c36e5c36ef94b3320306cf71de4958d4e..4f116cb58d1f8d2f35fbf9b7a4f58e18525fb9cc 100644 --- a/tests/script/tsim/tag/filter.sim +++ b/tests/script/tsim/tag/filter.sim @@ -24,31 +24,31 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol binary(10)) $i = 0 while $i < 5 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( '0' ) + sql create table $tb using $mt tags( '0' ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw while $i < 10 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( '1' ) + sql create table $tb using $mt tags( '1' ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print =============== step2 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = '1' -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = '1' +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi @@ -62,18 +62,18 @@ sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(t step3: print =============== step4 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = '1' -if $rows != 1 then +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = '1' +if $rows != 1 then return -1 endi -if $data00 != 10 then +if $data00 != 10 then return -1 endi print =============== step5 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 200 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then return -1 endi @@ -96,9 +96,9 @@ sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(t step9: print =============== step10 -sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi @@ -112,14 +112,14 @@ step12: print =============== step13 sql select count(tbcol) as c from $mt group by tgcol -print $data00 -if $data00 != 100 then +print $data00 +if $data00 != 100 then return -1 endi print =============== step14 sql select count(tbcol) as c from $mt where ts > 1000 group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 if $data00 != 100 then print expect 100, actual $data00 return -1 @@ -132,16 +132,16 @@ step15: print =============== step16 sql select count(tbcol) as c from $mt where tgcol = '1' group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/float.sim b/tests/script/tsim/tag/float.sim index d1761883292c91a3e3264cb35238da896bd4239f..10fac93d5d547fb2232a26abb9c68e5baa5cd77a 100644 --- a/tests/script/tsim/tag/float.sim +++ b/tests/script/tsim/tag/float.sim @@ -24,50 +24,50 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol float) $i = 0 while $i < 5 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 0 ) + sql create table $tb using $mt tags( 0 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw while $i < 10 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 1 ) + sql create table $tb using $mt tags( 1 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print =============== step2 sql select * from $tb -if $rows != $rowNum then +if $rows != $rowNum then return -1 endi sql select * from $tb where ts < now + 4m -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $tb where ts <= now + 4m -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $tb where ts > now + 4m -if $rows != 15 then +if $rows != 15 then return -1 endi sql select * from $tb where ts >= now + 4m -if $rows != 15 then +if $rows != 15 then return -1 endi sql select * from $tb where ts > now + 4m and ts < now + 5m -if $rows != 1 then +if $rows != 1 then return -1 endi sql select * from $tb where ts < now + 4m and ts > now + 5m @@ -83,156 +83,156 @@ if $rows != 0 then return -1 endi sql select * from $tb where ts > now + 4m and ts > now + 5m and ts < now + 6m -if $rows != 1 then +if $rows != 1 then return -1 endi print =============== step3 sql select * from $mt -if $rows != $totalNum then +if $rows != $totalNum then return -1 endi sql select * from $mt where ts < now + 4m -if $rows != 50 then +if $rows != 50 then return -1 endi sql select * from $mt where ts > now + 4m -if $rows != 150 then +if $rows != 150 then return -1 endi sql select * from $mt where ts = now + 4m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m -if $rows != 10 then +if $rows != 10 then return -1 endi print =============== step4 sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step5 sql select * from $mt where ts > now + 4m and tgcol = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step6 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 200 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then return -1 endi print =============== step7 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step8 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 50 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 50 then return -1 endi print =============== step9 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step10 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step11 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step12 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt partition by tgcol interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/int.sim b/tests/script/tsim/tag/int.sim index 5a35695cbe31546629bc6d10b558fc4a0ca2d376..ac8d31db3bb181489311c9bec7b31abf1a39b305 100644 --- a/tests/script/tsim/tag/int.sim +++ b/tests/script/tsim/tag/int.sim @@ -24,50 +24,50 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int) $i = 0 while $i < 5 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 0 ) + sql create table $tb using $mt tags( 0 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw while $i < 10 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 1 ) + sql create table $tb using $mt tags( 1 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print =============== step2 sql select * from $tb -if $rows != $rowNum then +if $rows != $rowNum then return -1 endi sql select * from $tb where ts < now + 4m -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $tb where ts <= now + 4m -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $tb where ts > now + 4m -if $rows != 15 then +if $rows != 15 then return -1 endi sql select * from $tb where ts >= now + 4m -if $rows != 15 then +if $rows != 15 then return -1 endi sql select * from $tb where ts > now + 4m and ts < now + 5m -if $rows != 1 then +if $rows != 1 then return -1 endi sql select * from $tb where ts < now + 4m and ts > now + 5m @@ -78,160 +78,160 @@ sql select * from $tb where ts > 100000 and ts < 100000 if $rows != 0 then return -1 endi -sql select * from $tb where ts > now + 4m and ts < now + 3m +sql select * from $tb where ts > now + 4m and ts < now + 3m if $rows != 0 then return -1 endi sql select * from $tb where ts > now + 4m and ts > now + 5m and ts < now + 6m -if $rows != 1 then +if $rows != 1 then return -1 endi print =============== step3 sql select * from $mt -if $rows != $totalNum then +if $rows != $totalNum then return -1 endi sql select * from $mt where ts < now + 4m -if $rows != 50 then +if $rows != 50 then return -1 endi sql select * from $mt where ts > now + 4m -if $rows != 150 then +if $rows != 150 then return -1 endi sql select * from $mt where ts = now + 4m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m -if $rows != 10 then +if $rows != 10 then return -1 endi print =============== step4 sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step5 sql select * from $mt where ts > now + 4m and tgcol = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step6 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 200 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then return -1 endi print =============== step7 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step8 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 50 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 50 then return -1 endi print =============== step9 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step10 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step11 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step12 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt partition by tgcol interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/int_binary.sim b/tests/script/tsim/tag/int_binary.sim index 53058ee331b26ac7eef46d8ed2272ae90dcc686e..83a830f64a53a458dcda87209e91cab3e0d0f18f 100644 --- a/tests/script/tsim/tag/int_binary.sim +++ b/tests/script/tsim/tag/int_binary.sim @@ -24,283 +24,283 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int, tgcol2 binary(5)) $i = 0 while $i < 5 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 0, '0' ) + sql create table $tb using $mt tags( 0, '0' ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw while $i < 10 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 1, '1' ) + sql create table $tb using $mt tags( 1, '1' ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print =============== step2 sql select * from $mt -if $rows != $totalNum then +if $rows != $totalNum then return -1 endi sql select * from $mt where ts < now + 4m -if $rows != 50 then +if $rows != 50 then return -1 endi sql select * from $mt where ts > now + 4m -if $rows != 150 then +if $rows != 150 then return -1 endi sql select * from $mt where ts = now + 4m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m -if $rows != 10 then +if $rows != 10 then return -1 endi print =============== step3 sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step4 sql select * from $mt where tgcol2 = '0' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> '0' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 = '1' -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> '1' -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step5 sql select * from $mt where ts > now + 4m and tgcol = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step6 sql select * from $mt where ts > now + 4m and tgcol2 = '1' -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> '1' -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 = '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 <> '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 = '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 <> '0' -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol2 <> '0' -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> '0' and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step7 sql select * from $mt where ts > now + 4m and tgcol2 = '1' and tgcol = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> '1' and tgcol <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 = '0' and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 <> '0' and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 = '0' and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 <> '0' and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol2 <> '0' and tgcol <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> '0' and ts < now + 5m and ts < now + 5m and tgcol <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step8 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 200 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then return -1 endi print =============== step9 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol2 = '1' -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 and tgcol2 = '1' -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step10 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 50 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 50 then return -1 endi print =============== step11 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step12 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol2 = '1' group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 and tgcol2 = '1' group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step13 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step14 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt partition by tgcol interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/int_float.sim b/tests/script/tsim/tag/int_float.sim index 826e1f5c084b8689f481185219e1c4ccc0c59673..009629aac97fbfb0c7dc9fe26acb848ea0ec3a49 100644 --- a/tests/script/tsim/tag/int_float.sim +++ b/tests/script/tsim/tag/int_float.sim @@ -24,299 +24,299 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int, tgcol2 float) $i = 0 while $i < 5 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 0, 0 ) + sql create table $tb using $mt tags( 0, 0 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw while $i < 10 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 1, 1 ) + sql create table $tb using $mt tags( 1, 1 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print =============== step2 sql select * from $mt -if $rows != $totalNum then +if $rows != $totalNum then return -1 endi sql select * from $mt where ts < now + 4m -if $rows != 50 then +if $rows != 50 then return -1 endi sql select * from $mt where ts > now + 4m -if $rows != 150 then +if $rows != 150 then return -1 endi sql select * from $mt where ts = now + 4m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m -if $rows != 10 then +if $rows != 10 then return -1 endi print =============== step3 sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step4 sql select * from $mt where tgcol2 > 0.5 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 < 0.5 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 > 0.5 and tgcol2 < 1.5 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol2 <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step5 sql select * from $mt where ts > now + 4m and tgcol = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step6 sql select * from $mt where ts > now + 4m and tgcol2 = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol2 <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step7 sql select * from $mt where ts > now + 4m and tgcol2 = 1 and tgcol = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 1 and tgcol <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 = 0 and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol2 <> 0 and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 = 0 and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol2 <> 0 and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol2 <> 0 and tgcol <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol2 <> 0 and ts < now + 5m and ts < now + 5m and tgcol <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step8 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 200 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then return -1 endi print =============== step9 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol2 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 and tgcol2 = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step10 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 50 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 50 then return -1 endi print =============== step11 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step12 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol2 = 1 group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 and tgcol2 = 1 group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step13 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step14 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt partition by tgcol interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/set.sim b/tests/script/tsim/tag/set.sim index ebca50a3be13311fa46672012d26d30385d2ec4a..c66ae65903b324b823f1572d42c3e3aabd7bf30b 100644 --- a/tests/script/tsim/tag/set.sim +++ b/tests/script/tsim/tag/set.sim @@ -25,18 +25,18 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bool, tgcol2 int) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol1 = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi @@ -50,46 +50,46 @@ sql reset query cache sql select * from $mt where tgcol1 = false print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 0 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi sql select * from $mt where tgcol2 = 4 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi if $data02 != 0 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi sql describe $tb print $data21 $data23 $data32 $data33 -if $data21 != BOOL then +if $data21 != BOOL then return -1 endi -if $data31 != INT then +if $data31 != INT then return -1 endi if $data23 != TAG then return -1 endi -if $data33 != TAG then +if $data33 != TAG then return -1 endi @@ -99,18 +99,18 @@ $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 smallint, tgcol2 tinyint) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol1 = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi @@ -121,58 +121,58 @@ sql reset query cache sql select * from $mt where tgcol1 = 3 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 3 then +if $data02 != 3 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi sql select * from $mt where tgcol2 = 4 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 3 then +if $data02 != 3 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi sql select * from $mt where tgcol2 = 2 -if $rows != 0 then +if $rows != 0 then return -1 endi - - + + print =============== step4 $i = 4 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 bigint, tgcol2 float) sql create table $tb using $mt tags( 1, 2 ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol1 = 1 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2.00000 then +if $data03 != 2.00000 then return -1 endi @@ -183,53 +183,53 @@ sql reset query cache sql select * from $mt where tgcol1 = 3 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 3 then +if $data02 != 3 then return -1 endi -if $data03 != 4.00000 then +if $data03 != 4.00000 then return -1 endi sql select * from $mt where tgcol2 = 4 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 3 then +if $data02 != 3 then return -1 endi -if $data03 != 4.00000 then +if $data03 != 4.00000 then return -1 endi - - + + print =============== step5 $i = 5 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 double, tgcol2 binary(10)) sql create table $tb using $mt tags( 1, '2' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol2 = '2' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1.000000000 then +if $data02 != 1.000000000 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi @@ -240,64 +240,64 @@ sql reset query cache sql select * from $mt where tgcol1 = 3 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 3.000000000 then +if $data02 != 3.000000000 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi sql select * from $mt where tgcol2 = '4' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 3.000000000 then +if $data02 != 3.000000000 then return -1 endi -if $data03 != 4 then +if $data03 != 4 then return -1 endi - + print =============== step6 $i = 6 $mt = $mtPrefix . $i $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 binary(10), tgcol2 int, tgcol3 smallint, tgcol4 binary(11), tgcol5 double, tgcol6 binary(20)) sql create table $tb using $mt tags( '1', 2, 3, '4', 5, '6' ) -sql insert into $tb values(now, 1) +sql insert into $tb values(now, 1) sql select * from $mt where tgcol1 = '1' -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 1 then +if $data02 != 1 then return -1 endi -if $data03 != 2 then +if $data03 != 2 then return -1 endi -if $data04 != 3 then +if $data04 != 3 then return -1 endi -if $data05 != 4 then +if $data05 != 4 then return -1 endi -if $data06 != 5.000000000 then +if $data06 != 5.000000000 then return -1 endi -if $data07 != 6 then +if $data07 != 6 then return -1 endi @@ -309,146 +309,146 @@ sql alter table $tb set tag tgcol5=10 sql alter table $tb set tag tgcol6='11' sql reset query cache - + sql select * from $mt where tgcol1 = '7' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 7 then +if $data02 != 7 then return -1 endi -if $data03 != 8 then +if $data03 != 8 then return -1 endi -if $data04 != 9 then +if $data04 != 9 then return -1 endi -if $data05 != 10.000000000 then +if $data05 != 10.000000000 then return -1 endi -if $data06 != 11 then +if $data06 != 11 then return -1 endi -if $data07 != null then +if $data07 != null then return -1 endi sql select * from $mt where tgcol2 = 8 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 7 then +if $data02 != 7 then return -1 endi -if $data03 != 8 then +if $data03 != 8 then return -1 endi -if $data04 != 9 then +if $data04 != 9 then return -1 endi -if $data05 != 10.000000000 then +if $data05 != 10.000000000 then return -1 endi -if $data06 != 11 then +if $data06 != 11 then return -1 endi -if $data07 != null then +if $data07 != null then return -1 endi sql select * from $mt where tgcol4 = '9' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 7 then +if $data02 != 7 then return -1 endi -if $data03 != 8 then +if $data03 != 8 then return -1 endi -if $data04 != 9 then +if $data04 != 9 then return -1 endi -if $data05 != 10.000000000 then +if $data05 != 10.000000000 then return -1 endi -if $data06 != 11 then +if $data06 != 11 then return -1 endi -if $data07 != null then +if $data07 != null then return -1 endi sql select * from $mt where tgcol5 = 10 print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 7 then +if $data02 != 7 then return -1 endi -if $data03 != 8 then +if $data03 != 8 then return -1 endi -if $data04 != 9 then +if $data04 != 9 then return -1 endi -if $data05 != 10.000000000 then +if $data05 != 10.000000000 then return -1 endi -if $data06 != 11 then +if $data06 != 11 then return -1 endi -if $data07 != null then +if $data07 != null then return -1 endi sql select * from $mt where tgcol6 = '11' print $data01 $data02 $data03 -if $rows != 1 then +if $rows != 1 then return -1 endi -if $data01 != 1 then +if $data01 != 1 then return -1 endi -if $data02 != 7 then +if $data02 != 7 then return -1 endi -if $data03 != 8 then +if $data03 != 8 then return -1 endi -if $data04 != 9 then +if $data04 != 9 then return -1 endi -if $data05 != 10.000000000 then +if $data05 != 10.000000000 then return -1 endi -if $data06 != 11 then +if $data06 != 11 then return -1 endi -if $data07 != null then +if $data07 != null then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi diff --git a/tests/script/tsim/tag/smallint.sim b/tests/script/tsim/tag/smallint.sim index 9fb3ca142646f2ea4de7c7892f2e38987d155596..e3a819c837761a2ffddbc8d3cfb7f77c2d785d06 100644 --- a/tests/script/tsim/tag/smallint.sim +++ b/tests/script/tsim/tag/smallint.sim @@ -24,214 +24,214 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol smallint) $i = 0 while $i < 5 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 0 ) + sql create table $tb using $mt tags( 0 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw while $i < 10 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 1 ) + sql create table $tb using $mt tags( 1 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print =============== step2 sql select * from $tb -if $rows != $rowNum then +if $rows != $rowNum then return -1 endi sql select * from $tb where ts < now + 4m -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $tb where ts <= now + 4m -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $tb where ts > now + 4m -if $rows != 15 then +if $rows != 15 then return -1 endi sql select * from $tb where ts >= now + 4m -if $rows != 15 then +if $rows != 15 then return -1 endi sql select * from $tb where ts > now + 4m and ts < now + 5m -if $rows != 1 then +if $rows != 1 then return -1 endi sql select * from $tb where ts < now + 4m and ts > now + 5m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts > 100000 and ts < 100000 -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts > now + 4m and ts < now + 3m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $tb where ts > now + 4m and ts > now + 5m and ts < now + 6m -if $rows != 1 then +if $rows != 1 then return -1 endi print =============== step3 sql select * from $mt -if $rows != $totalNum then +if $rows != $totalNum then return -1 endi sql select * from $mt where ts < now + 4m -if $rows != 50 then +if $rows != 50 then return -1 endi sql select * from $mt where ts > now + 4m -if $rows != 150 then +if $rows != 150 then return -1 endi sql select * from $mt where ts = now + 4m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m -if $rows != 10 then +if $rows != 10 then return -1 endi print =============== step4 sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step5 sql select * from $mt where ts > now + 4m and tgcol = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step6 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 200 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then return -1 endi print =============== step7 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step8 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 50 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 50 then return -1 endi print =============== step9 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step10 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step11 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step12 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt partition by tgcol interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/tag/tinyint.sim b/tests/script/tsim/tag/tinyint.sim index 11cd6ee3b2b8ec5e5defbc65e4abed6fe0fc32c7..8560def34cc22cc73a592ac062d5bb8f9867b8c0 100644 --- a/tests/script/tsim/tag/tinyint.sim +++ b/tests/script/tsim/tag/tinyint.sim @@ -24,214 +24,214 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol tinyint) $i = 0 while $i < 5 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 0 ) + sql create table $tb using $mt tags( 0 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw while $i < 10 $tb = $tbPrefix . $i - sql create table $tb using $mt tags( 1 ) + sql create table $tb using $mt tags( 1 ) $x = 0 while $x < $rowNum - $ms = $x . m - sql insert into $tb values (now + $ms , $x ) + $ms = $x . m + sql insert into $tb values (now + $ms , $x ) $x = $x + 1 - endw + endw $i = $i + 1 -endw +endw print =============== step2 sql select * from $tb -if $rows != $rowNum then +if $rows != $rowNum then return -1 endi sql select * from $tb where ts < now + 4m -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $tb where ts <= now + 4m -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $tb where ts > now + 4m -if $rows != 15 then +if $rows != 15 then return -1 endi sql select * from $tb where ts >= now + 4m -if $rows != 15 then +if $rows != 15 then return -1 endi sql select * from $tb where ts > now + 4m and ts < now + 5m -if $rows != 1 then +if $rows != 1 then return -1 endi sql select * from $tb where ts < now + 4m and ts > now + 5m if $rows != 0 then return -1 endi -sql select * from $tb where ts > 100000 and ts < 100000 +sql select * from $tb where ts > 100000 and ts < 100000 if $rows != 0 then return -1 endi -sql select * from $tb where ts > now + 4m and ts < now + 3m +sql select * from $tb where ts > now + 4m and ts < now + 3m if $rows != 0 then return -1 endi sql select * from $tb where ts > now + 4m and ts > now + 5m and ts < now + 6m -if $rows != 1 then +if $rows != 1 then return -1 endi print =============== step3 sql select * from $mt -if $rows != $totalNum then +if $rows != $totalNum then return -1 endi sql select * from $mt where ts < now + 4m -if $rows != 50 then +if $rows != 50 then return -1 endi sql select * from $mt where ts > now + 4m -if $rows != 150 then +if $rows != 150 then return -1 endi sql select * from $mt where ts = now + 4m -if $rows != 0 then +if $rows != 0 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m -if $rows != 10 then +if $rows != 10 then return -1 endi print =============== step4 sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 1 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 0 -if $rows != 100 then +if $rows != 100 then return -1 endi sql select * from $mt where tgcol <> 0 -if $rows != 100 then +if $rows != 100 then return -1 endi print =============== step5 sql select * from $mt where ts > now + 4m and tgcol = 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> 1 -if $rows != 75 then +if $rows != 75 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts < now + 4m and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol = 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts <= now + 4m and tgcol <> 0 -if $rows != 25 then +if $rows != 25 then return -1 endi sql select * from $mt where ts > now + 4m and ts < now + 5m and tgcol <> 0 -if $rows != 5 then +if $rows != 5 then return -1 endi sql select * from $mt where ts > now + 4m and tgcol <> 0 and ts < now + 5m -if $rows != 5 then +if $rows != 5 then return -1 endi print =============== step6 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 200 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then return -1 endi print =============== step7 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step8 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 50 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 50 then return -1 endi print =============== step9 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step10 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = 1 group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== step11 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m group by tgcol -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 25 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then return -1 endi print =============== step12 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt partition by tgcol interval(1d) -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then return -1 endi print =============== clear sql drop database $db sql select * from information_schema.ins_databases -if $rows != 2 then +if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/user/privilege_sysinfo.sim b/tests/script/tsim/user/privilege_sysinfo.sim index 25c1a84db699a8cdef5678abaf728f4a93690bde..86f95755d09f7e2f1c0654b94f63f471f9a074d0 100644 --- a/tests/script/tsim/user/privilege_sysinfo.sim +++ b/tests/script/tsim/user/privilege_sysinfo.sim @@ -8,7 +8,20 @@ sql create user sysinfo0 pass 'taosdata' sql create user sysinfo1 pass 'taosdata' sql alter user sysinfo0 sysinfo 0 sql alter user sysinfo1 sysinfo 1 + sql create database db +sql use db +sql create table db.stb (ts timestamp, i int) tags (t int) +sql create table db.ctb using db.stb tags (1) +sql create table db.ntb (ts timestamp, i int) +sql insert into db.ctb values (now, 1); +sql insert into db.ntb values (now, 1); +sql select * from db.stb +sql select * from db.ctb +sql select * from db.ntb + +sql create database d2 +sql GRANT all ON d2.* to sysinfo0; print user sysinfo0 login sql close @@ -17,11 +30,31 @@ sql connect sysinfo0 print =============== check oper sql_error create user u1 pass 'u1' sql_error drop user sysinfo1 -sql_error alter user sysinfo1 pass '1' sql_error alter user sysinfo0 pass '1' +sql_error alter user sysinfo0 enable 0 +sql_error alter user sysinfo0 enable 1 +sql_error alter user sysinfo1 pass '1' +sql_error alter user sysinfo1 enable 1 +sql_error alter user sysinfo1 enable 1 +sql_error GRANT read ON db.* to sysinfo0; +sql_error GRANT read ON *.* to sysinfo0; +sql_error REVOKE read ON db.* from sysinfo0; +sql_error REVOKE read ON *.* from sysinfo0; +sql_error GRANT write ON db.* to sysinfo0; +sql_error GRANT write ON *.* to sysinfo0; +sql_error REVOKE write ON db.* from sysinfo0; +sql_error REVOKE write ON *.* from sysinfo0; +sql_error REVOKE write ON *.* from sysinfo0; sql_error create dnode $hostname port 7200 sql_error drop dnode 1 +sql_error alter dnode 1 'debugFlag 135' +sql_error alter dnode 1 'dDebugFlag 131' +sql_error alter dnode 1 'resetlog' +sql_error alter dnode 1 'monitor' '1' +sql_error alter dnode 1 'monitor' '0' +sql_error alter dnode 1 'monitor 1' +sql_error alter dnode 1 'monitor 0' sql_error create qnode on dnode 1 sql_error drop qnode on dnode 1 @@ -44,20 +77,106 @@ sql_error create database d1 sql_error drop database db sql_error use db sql_error alter database db replica 1; +sql_error alter database db keep 21 sql_error show db.vgroups -sql select * from information_schema.ins_stables where db_name = 'db' -sql select * from information_schema.ins_tables where db_name = 'db' + +sql_error create table db.stb1 (ts timestamp, i int) tags (t int) +sql_error create table db.ctb1 using db.stb1 tags (1) +sql_error create table db.ntb1 (ts timestamp, i int) +sql_error insert into db.ctb values (now, 1); +sql_error insert into db.ntb values (now, 1); +sql_error select * from db.stb +sql_error select * from db.ctb +sql_error select * from db.ntb + +sql use d2 +sql create table d2.stb2 (ts timestamp, i int) tags (t int) +sql create table d2.ctb2 using d2.stb2 tags (1) +sql create table d2.ntb2 (ts timestamp, i int) +sql insert into d2.ctb2 values (now, 1); +sql insert into d2.ntb2 values (now, 1); +sql select * from d2.stb2 +sql select * from d2.ctb2 +sql select * from d2.ntb2 print =============== check show -sql select * from information_schema.ins_users +sql_error show users sql_error show cluster -sql select * from information_schema.ins_dnodes -sql select * from information_schema.ins_mnodes +sql_error select * from information_schema.ins_dnodes +sql_error select * from information_schema.ins_mnodes sql_error show snodes -sql select * from information_schema.ins_qnodes +sql_error select * from information_schema.ins_qnodes +sql_error show dnodes +sql_error show snodes +sql_error show qnodes +sql_error show mnodes sql_error show bnodes +sql_error show db.vgroups +sql_error show db.stables +sql_error show db.tables +sql_error show indexes from stb from db +sql show databases +sql_error show d2.vgroups +sql show d2.stables +sql show d2.tables +sql show indexes from stb2 from d2 +#sql_error show create database db +sql_error show create table db.stb; +sql_error show create table db.ctb; +sql_error show create table db.ntb; +sql show streams +sql show consumers +sql show topics +sql show subscriptions +sql show functions sql_error show grants +sql show queries +sql show connections +sql show apps +sql show transactions +sql_error show create database d2 +sql show create table d2.stb2; +sql show create table d2.ctb2; +sql show create table d2.ntb2; +sql_error show variables; +sql show local variables; sql_error show dnode 1 variables; -sql show variables; +sql_error show variables; + + +print =============== check information_schema +sql show databases +if $rows != 3 then + return -1 +endi + +sql use information_schema; +sql_error select * from information_schema.ins_dnodes +sql_error select * from information_schema.ins_mnodes +sql_error select * from information_schema.ins_modules +sql_error select * from information_schema.ins_qnodes +sql_error select * from information_schema.ins_cluster +sql select * from information_schema.ins_databases +sql select * from information_schema.ins_functions +sql select * from information_schema.ins_indexes +sql select * from information_schema.ins_stables +sql select * from information_schema.ins_tables +sql select * from information_schema.ins_tags +sql select * from information_schema.ins_users +sql select * from information_schema.ins_topics +sql select * from information_schema.ins_subscriptions +sql select * from information_schema.ins_streams +sql_error select * from information_schema.ins_grants +sql_error select * from information_schema.ins_vgroups +sql_error select * from information_schema.ins_configs +sql_error select * from information_schema.ins_dnode_variables + +print =============== check performance_schema +sql use performance_schema; +sql select * from performance_schema.perf_connections +sql select * from performance_schema.perf_queries +sql select * from performance_schema.perf_consumers +sql select * from performance_schema.perf_trans +sql select * from performance_schema.perf_apps -system sh/exec.sh -n dnode1 -s stop -x SIGINT +#system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/valgrind/checkError6.sim b/tests/script/tsim/valgrind/checkError6.sim index 00de00f71d06810e9d2a72f2b8d06bad5aa42266..d85a1bebc898ca79a20e4c495081077a2b1a4249 100644 --- a/tests/script/tsim/valgrind/checkError6.sim +++ b/tests/script/tsim/valgrind/checkError6.sim @@ -67,17 +67,17 @@ sql select diff(tbcol) from tb1 where tbcol > 5 and tbcol < 20 order by ts sql select first(tbcol), last(tbcol) as b from tb1 where ts <= 1601481840000 interval(1m) sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from tb1 where ts <= 1601481840000 partition by tgcol order by tgcol sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from tb1 where ts <= 1601481840000 partition by tgcol interval(1m) -sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) +sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0) sql select last_row(*) from tb1 where tbcol > 5 and tbcol < 20 sql select _wstart, _wend, _wduration, _qstart, _qend, count(*) from tb1 interval(10s, 2s) sliding(10s) -sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) -sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) order by tgcol desc +sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0) +sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0) order by tgcol desc sql select log(tbcol), abs(tbcol), pow(tbcol, 2), sqrt(tbcol), sin(tbcol), cos(tbcol), tan(tbcol), asin(tbcol), acos(tbcol), atan(tbcol), ceil(tbcol), floor(tbcol), round(tbcol), atan(tbcol) from tb1 sql select length("abcd1234"), char_length("abcd1234=-+*") from tb1 sql select tbcol4, length(tbcol4), lower(tbcol4), upper(tbcol4), ltrim(tbcol4), rtrim(tbcol4), concat(tbcol4, tbcol5), concat_ws('_', tbcol4, tbcol5), substr(tbcol4, 1, 4) from tb1 sql select * from tb1 where tbcol not in (1,2,3,null); sql select * from tb1 where tbcol + 3 <> null; -sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) +sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0) sql select tbcol5 - tbcol3 from tb1 print =============== step4: stb @@ -97,8 +97,8 @@ sql select first(tbcol), last(tbcol) as c from stb group by tgcol sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 and tbcol2 is null partition by tgcol interval(1m) sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m) sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from stb where ts <= 1601481840000 partition by tgcol interval(1m) -sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) -sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 and tgcol = 1 partition by tgcol interval(1m) fill(value, 0) order by tgcol desc +sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0) +sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 and tgcol = 1 partition by tgcol interval(1m) fill(value, 0,0,0,0,0) order by tgcol desc sql select last_row(tbcol), stddev(tbcol) from stb where tbcol > 5 and tbcol < 20 group by tgcol sql select _wstart, _wend, _wduration, _qstart, _qend, count(*) from stb interval(10s, 2s) sliding(10s) sql select log(tbcol), abs(tbcol), pow(tbcol, 2), sqrt(tbcol), sin(tbcol), cos(tbcol), tan(tbcol), asin(tbcol), acos(tbcol), atan(tbcol), ceil(tbcol), floor(tbcol), round(tbcol), atan(tbcol) from stb @@ -108,14 +108,14 @@ sql select * from stb where tbcol not in (1,2,3,null); sql select * from stb where tbcol + 3 <> null; sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from stb where tbcol = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol interval(1d) sql select _wstart, count(*) from tb1 session(ts, 1m) -sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) +sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0) sql select tbcol5 - tbcol3 from stb sql select spread( tbcol2 )/44, spread(tbcol2), 0.204545455 * 44 from stb; sql select min(tbcol) * max(tbcol) /4, sum(tbcol2) * apercentile(tbcol2, 20), apercentile(tbcol2, 33) + 52/9 from stb; sql select distinct(tbname), tgcol from stb; -#sql select sum(tbcol) from stb partition by tbname interval(1s) slimit 1 soffset 1; -#sql select sum(tbcol) from stb partition by tbname interval(1s) slimit 2 soffset 4 limit 10 offset 1; +sql select sum(tbcol) from stb partition by tbname interval(1s) slimit 1 soffset 1; +sql select sum(tbcol) from stb partition by tbname interval(1s) slimit 2 soffset 4 limit 10 offset 1; print =============== step5: explain sql explain analyze select ts from stb where -2; @@ -127,8 +127,8 @@ sql explain analyze select count(*),sum(tbcol) from stb; sql explain analyze select count(*),sum(tbcol) from stb group by tbcol; sql explain analyze select * from information_schema.ins_stables; sql explain analyze verbose true select * from information_schema.ins_stables where db_name='db2'; -sql explain analyze verbose true select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) -sql explain select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) +sql explain analyze verbose true select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0) +sql explain select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0) print =============== step6: in cast sql select 1+1n; diff --git a/tests/script/tsim/valgrind/checkError7.sim b/tests/script/tsim/valgrind/checkError7.sim index a66ddb30df063416e0f04da0dd58de9bebed186d..af42d1e76b50bc07e9c7a484bd24c9544517f980 100644 --- a/tests/script/tsim/valgrind/checkError7.sim +++ b/tests/script/tsim/valgrind/checkError7.sim @@ -66,7 +66,7 @@ $null= system_content sh/checkValgrind.sh -n dnode1 print cmd return result ----> [ $system_content ] -if $system_content > 2 then +if $system_content > 0 then return -1 endi diff --git a/tests/script/tsim/valgrind/checkError8.sim b/tests/script/tsim/valgrind/checkError8.sim index 7ca01bc3d04a489e389939221b88b9aa9432e939..2f204768eb1fc8922a07853155edfc29e97c3975 100644 --- a/tests/script/tsim/valgrind/checkError8.sim +++ b/tests/script/tsim/valgrind/checkError8.sim @@ -143,7 +143,7 @@ $null= system_content sh/checkValgrind.sh -n dnode1 print cmd return result ----> [ $system_content ] -if $system_content > 2 then +if $system_content > 0 then return -1 endi diff --git a/tests/system-test/1-insert/influxdb_line_taosc_insert.py b/tests/system-test/1-insert/influxdb_line_taosc_insert.py index 25e2378f4611aea030011ed29ecce6b9b96cad84..cae4294bc90c16ad3fed032eff610f5b943d789e 100644 --- a/tests/system-test/1-insert/influxdb_line_taosc_insert.py +++ b/tests/system-test/1-insert/influxdb_line_taosc_insert.py @@ -31,7 +31,7 @@ if platform.system().lower() == 'windows': class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) + tdSql.init(conn.cursor(), False) self._conn = conn def createDb(self, name="test", db_update_tag=0): @@ -357,7 +357,7 @@ class TDTestCase: """ normal tags and cols, one for every elm """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) @@ -365,7 +365,7 @@ class TDTestCase: """ check all normal type """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"] for t_type in full_type_list: input_sql, stb_name = self.genFullTypeSql(c0=t_type, t0=t_type) @@ -379,7 +379,7 @@ class TDTestCase: please test : binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"' ''' - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"' nchar_symbols = f'L{binary_symbols}' input_sql, stb_name = self.genFullTypeSql(c7=binary_symbols, c8=nchar_symbols, t7=binary_symbols, t8=nchar_symbols) @@ -390,7 +390,7 @@ class TDTestCase: test ts list --> ["1626006833639000000", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022"] # ! us级时间戳都为0时,数据库中查询显示,但python接口拿到的结果不显示 .000000的情况请确认,目前修改时间处理代码可以通过 """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") ts_list = ["1626006833639000000", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022", 0] for ts in ts_list: input_sql, stb_name = self.genFullTypeSql(ts=ts) @@ -401,7 +401,7 @@ class TDTestCase: check id.index in tags eg: t0=**,id=**,t1=** """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(id_change_tag=True) self.resCmp(input_sql, stb_name) @@ -410,7 +410,7 @@ class TDTestCase: check id param eg: id and ID """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True) self.resCmp(input_sql, stb_name) input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, id_upper_tag=True) @@ -420,7 +420,7 @@ class TDTestCase: """ id not exist """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True) self.resCmp(input_sql, stb_name) query_sql = f"select tbname from {stb_name}" @@ -436,10 +436,10 @@ class TDTestCase: max col count is ?? """ for input_sql in [self.genLongSql(127, 1)[0], self.genLongSql(1, 4093)[0]]: - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) for input_sql in [self.genLongSql(129, 1)[0], self.genLongSql(1, 4095)[0]]: - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) except SchemalessError as err: @@ -450,7 +450,7 @@ class TDTestCase: test illegal id name mix "~!@#$¥%^&*()-+|[]、「」【】;:《》<>?" """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") rstr = list("~!@#$¥%^&*()-+|[]、「」【】;:《》<>?") for i in rstr: stb_name=f"aaa{i}bbb" @@ -462,7 +462,7 @@ class TDTestCase: """ id is start with num """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(tb_name=f"\"1aaabbb\"")[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) @@ -473,7 +473,7 @@ class TDTestCase: """ check now unsupported """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(ts="now")[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) @@ -484,7 +484,7 @@ class TDTestCase: """ check date format ts unsupported """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) @@ -495,7 +495,7 @@ class TDTestCase: """ check ts format like 16260068336390us19 """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(ts="16260068336390us19")[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) @@ -506,7 +506,7 @@ class TDTestCase: """ check full type tag value limit """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # i8 for t1 in ["-128i8", "127i8"]: input_sql, stb_name = self.genFullTypeSql(t1=t1) @@ -602,7 +602,7 @@ class TDTestCase: """ check full type col value limit """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # i8 for c1 in ["-128i8", "127i8"]: input_sql, stb_name = self.genFullTypeSql(c1=c1) @@ -699,7 +699,7 @@ class TDTestCase: """ test illegal tag col value """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # bool for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]: input_sql1 = self.genFullTypeSql(t0=i)[0] @@ -758,7 +758,7 @@ class TDTestCase: """ check duplicate Id Tag Col """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql_id = self.genFullTypeSql(id_double_tag=True)[0] try: self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) @@ -792,7 +792,7 @@ class TDTestCase: """ case no id when stb exist """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", c0="f") self.resCmp(input_sql, stb_name) input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", c0="f") @@ -805,7 +805,7 @@ class TDTestCase: """ check duplicate insert when stb exist """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) @@ -816,7 +816,7 @@ class TDTestCase: """ check length increase """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) tb_name = tdCom.getLongName(5, "letters") @@ -833,7 +833,7 @@ class TDTestCase: * col is added without value when update==0 * col is added with value when update==1 """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") for db_update_tag in [0, 1]: if db_update_tag == 1 : @@ -850,7 +850,7 @@ class TDTestCase: """ check column and tag count add """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", c0="f") self.resCmp(input_sql, stb_name) @@ -866,7 +866,7 @@ class TDTestCase: condition: stb not change insert two table, keep tag unchange, change col """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(t0="f", c0="f", id_noexist_tag=True) self.resCmp(input_sql, stb_name) tb_name1 = self.getNoIdTbName(stb_name) @@ -888,7 +888,7 @@ class TDTestCase: """ every binary and nchar must be length+2 """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(7, "letters") tb_name = f'{stb_name}_1' input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000' @@ -928,7 +928,7 @@ class TDTestCase: """ check nchar length limit """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(7, "letters") tb_name = f'{stb_name}_1' input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000' @@ -963,7 +963,7 @@ class TDTestCase: """ test batch insert """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(8, "letters") # tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000", @@ -982,7 +982,7 @@ class TDTestCase: """ test multi insert """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") sql_list = [] stb_name = tdCom.getLongName(8, "letters") # tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') @@ -996,7 +996,7 @@ class TDTestCase: """ test batch error insert """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(8, "letters") lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000", f"{stb_name},t2=5f64,t3=L\"ste\" c1=tRue,c2=4i64,c3=\"iam\" 1626056811823316532ns"] @@ -1068,7 +1068,7 @@ class TDTestCase: """ thread input different stb """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genSqlList()[0] self.multiThreadRun(self.genMultiThreadSeq(input_sql)) tdSql.query(f"show tables;") @@ -1078,7 +1078,7 @@ class TDTestCase: """ thread input same stb tb, different data, result keep first data """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) self.resCmp(input_sql, stb_name) @@ -1095,7 +1095,7 @@ class TDTestCase: """ thread input same stb tb, different data, add columes and tags, result keep first data """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) self.resCmp(input_sql, stb_name) @@ -1112,7 +1112,7 @@ class TDTestCase: """ thread input same stb tb, different data, minus columes and tags, result keep first data """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) self.resCmp(input_sql, stb_name) @@ -1129,7 +1129,7 @@ class TDTestCase: """ thread input same stb, different tb, different data """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4] @@ -1144,7 +1144,7 @@ class TDTestCase: """ thread input same stb, different tb, different data, add col, mul tag """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) s_stb_d_tb_a_col_m_tag_list = self.genSqlList(stb_name=stb_name)[5] @@ -1159,7 +1159,7 @@ class TDTestCase: """ thread input same stb, different tb, different data, add tag, mul col """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) s_stb_d_tb_a_tag_m_col_list = self.genSqlList(stb_name=stb_name)[6] @@ -1171,7 +1171,7 @@ class TDTestCase: """ thread input same stb tb, different ts """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) self.resCmp(input_sql, stb_name) @@ -1186,7 +1186,7 @@ class TDTestCase: """ thread input same stb tb, different ts, add col, mul tag """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) self.resCmp(input_sql, stb_name) @@ -1205,7 +1205,7 @@ class TDTestCase: """ thread input same stb tb, different ts, add tag, mul col """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) self.resCmp(input_sql, stb_name) @@ -1226,7 +1226,7 @@ class TDTestCase: """ thread input same stb, different tb, data, ts """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10] @@ -1241,7 +1241,7 @@ class TDTestCase: """ thread input same stb, different tb, data, ts, add col, mul tag """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) s_stb_d_tb_d_ts_a_col_m_tag_list = self.genSqlList(stb_name=stb_name)[11] diff --git a/tests/system-test/1-insert/opentsdb_json_taosc_insert.py b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py index 003abe9d10346f9b7cce1dbdb6f6f0ed73e3ea55..3b01784000b74c1f6bb072f24e8be36e99d37f4f 100644 --- a/tests/system-test/1-insert/opentsdb_json_taosc_insert.py +++ b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py @@ -459,7 +459,7 @@ class TDTestCase: normal tags and cols, one for every elm """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(value_type=value_type) self.resCmp(input_json, stb_name) @@ -468,7 +468,7 @@ class TDTestCase: check all normal type """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"] for t_type in full_type_list: input_json_list = [self.genFullTypeJson(tag_value=self.genTagValue(t0_value=t_type))[0], @@ -489,7 +489,7 @@ class TDTestCase: binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"' ''' tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"' nchar_symbols = binary_symbols input_sql1, stb_name1 = self.genFullTypeJson(col_value=self.genTsColValue(value=binary_symbols, t_type="binary", value_type=value_type), @@ -505,7 +505,7 @@ class TDTestCase: # ! us级时间戳都为0时,数据库中查询显示,但python接口拿到的结果不显示 .000000的情况请确认,目前修改时间处理代码可以通过 """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") ts_list = ["1626006833639000000ns", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006834", 0] for ts in ts_list: if "s" in str(ts): @@ -571,7 +571,7 @@ class TDTestCase: eg: t0=**,id=**,t1=** """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(id_change_tag=True, value_type=value_type) self.resCmp(input_json, stb_name) @@ -581,7 +581,7 @@ class TDTestCase: eg: id and ID """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(id_upper_tag=True, value_type=value_type) self.resCmp(input_json, stb_name) input_json, stb_name = self.genFullTypeJson(id_mixul_tag=True, value_type=value_type) @@ -594,7 +594,7 @@ class TDTestCase: id not exist """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(id_noexist_tag=True, value_type=value_type) self.resCmp(input_json, stb_name) query_sql = f"select tbname from {stb_name}" @@ -610,10 +610,10 @@ class TDTestCase: """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') for input_json in [self.genLongJson(128, value_type)[0]]: - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) for input_json in [self.genLongJson(129, value_type)[0]]: - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) except SchemalessError as err: @@ -625,7 +625,7 @@ class TDTestCase: mix "`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?" """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") rstr = list("`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?") for i in rstr: input_json = self.genFullTypeJson(tb_name=f'aa{i}bb', value_type=value_type)[0] @@ -639,7 +639,7 @@ class TDTestCase: id is start with num """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(tb_name="1aaabbb", value_type=value_type)[0] try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -651,7 +651,7 @@ class TDTestCase: check now unsupported """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="now", t_type="ns", value_type=value_type))[0] try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -663,7 +663,7 @@ class TDTestCase: check date format ts unsupported """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="2021-07-21\ 19:01:46.920", t_type="ns", value_type=value_type))[0] try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -675,7 +675,7 @@ class TDTestCase: check ts format like 16260068336390us19 """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="16260068336390us19", t_type="us", value_type=value_type))[0] try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -690,7 +690,7 @@ class TDTestCase: length of stb_name tb_name <= 192 """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tdSql.execute('reset query cache') stb_name_192 = tdCom.getLongName(len=192, mode="letters") tb_name_192 = tdCom.getLongName(len=192, mode="letters") @@ -715,7 +715,7 @@ class TDTestCase: check tag name limit <= 62 """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tag_name = tdCom.getLongName(61, "letters") tag_name = f't{tag_name}' stb_name = tdCom.getLongName(7, "letters") @@ -733,7 +733,7 @@ class TDTestCase: check full type tag value limit """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # i8 for t1 in [-127, 127]: input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t1_value=t1, value_type=value_type)) @@ -854,12 +854,12 @@ class TDTestCase: check full type col value limit """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # i8 for value in [-128, 127]: input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="tinyint", value_type=value_type)) self.resCmp(input_json, stb_name) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-129, 128]: input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="tinyint"))[0] try: @@ -868,11 +868,11 @@ class TDTestCase: except SchemalessError as err: tdSql.checkNotEqual(err.errno, 0) # i16 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-32768]: input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="smallint", value_type=value_type)) self.resCmp(input_json, stb_name) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-32769, 32768]: input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="smallint"))[0] try: @@ -882,11 +882,11 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # i32 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-2147483648]: input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="int", value_type=value_type)) self.resCmp(input_json, stb_name) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-2147483649, 2147483648]: input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="int"))[0] try: @@ -896,12 +896,12 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # i64 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-9223372036854775808]: input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="bigint", value_type=value_type)) self.resCmp(input_json, stb_name) # ! bug - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # for value in [-9223372036854775809, 9223372036854775808]: # print(value) # input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="bigint"))[0] @@ -913,12 +913,12 @@ class TDTestCase: # tdSql.checkNotEqual(err.errno, 0) # f32 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-3.4028234663852885981170418348451692544*(10**38), 3.4028234663852885981170418348451692544*(10**38)]: input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="float", value_type=value_type)) self.resCmp(input_json, stb_name) # * limit set to 4028234664*(10**38) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-3.4028234664*(10**38), 3.4028234664*(10**38)]: input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="float"))[0] try: @@ -928,12 +928,12 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # f64 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308), -1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)]: input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="double", value_type=value_type)) self.resCmp(input_json, stb_name) # * limit set to 1.797693134862316*(10**308) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [-1.797693134862316*(10**308), -1.797693134862316*(10**308)]: input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="double", value_type=value_type))[0] try: @@ -944,12 +944,12 @@ class TDTestCase: # if value_type == "obj": # # binary - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # stb_name = tdCom.getLongName(7, "letters") # input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(16374, "letters"), 'type': 'binary'}, "tags": {"t0": {'value': True, 'type': 'bool'}}} # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(16375, "letters"), 'type': 'binary'}, "tags": {"t0": {'value': True, 'type': 'bool'}}} # try: # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -959,12 +959,12 @@ class TDTestCase: # # nchar # # * legal nchar could not be larger than 16374/4 - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # stb_name = tdCom.getLongName(7, "letters") # input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(4093, "letters"), 'type': 'nchar'}, "tags": {"t0": {'value': True, 'type': 'bool'}}} # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(4094, "letters"), 'type': 'nchar'}, "tags": {"t0": {'value': True, 'type': 'bool'}}} # try: # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -973,14 +973,14 @@ class TDTestCase: # tdSql.checkNotEqual(err.errno, 0) # elif value_type == "default": # # binary - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # stb_name = tdCom.getLongName(7, "letters") # if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary": # input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(16374, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}} # elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar": # input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(4093, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}} # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary": # input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(16375, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}} # elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar": @@ -997,7 +997,7 @@ class TDTestCase: test illegal tag col value """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # bool for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]: try: @@ -1046,7 +1046,7 @@ class TDTestCase: check duplicate Id Tag Col """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(id_double_tag=True, value_type=value_type)[0] print(input_json) try: @@ -1068,7 +1068,7 @@ class TDTestCase: case no id when stb exist """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(tb_name="sub_table_0123456", col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type)) self.resCmp(input_json, stb_name) input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, id_noexist_tag=True, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type)) @@ -1081,7 +1081,7 @@ class TDTestCase: check duplicate insert when stb exist """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(value_type=value_type) self.resCmp(input_json, stb_name) self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -1091,7 +1091,7 @@ class TDTestCase: """ check length increase """ - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(value_type=value_type) self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) self.resCmp(input_json, stb_name) @@ -1105,7 +1105,7 @@ class TDTestCase: check length increase """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = "test_crash" input_json = self.genFullTypeJson(stb_name=stb_name)[0] self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -1128,7 +1128,7 @@ class TDTestCase: * col is added with value when update==1 """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") for db_update_tag in [0, 1]: if db_update_tag == 1 : @@ -1154,7 +1154,7 @@ class TDTestCase: check tag count add """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type)) self.resCmp(input_json, stb_name) @@ -1171,7 +1171,7 @@ class TDTestCase: insert two table, keep tag unchange, change col """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), id_noexist_tag=True) self.resCmp(input_json, stb_name) tb_name1 = self.getNoIdTbName(stb_name) @@ -1194,7 +1194,7 @@ class TDTestCase: every binary and nchar must be length+2 """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(7, "letters") tb_name = f'{stb_name}_1' tag_value = {"t0": {"value": True, "type": "bool"}} @@ -1240,7 +1240,7 @@ class TDTestCase: check nchar length limit """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(7, "letters") tb_name = f'{stb_name}_1' tag_value = {"t0": True} @@ -1284,7 +1284,7 @@ class TDTestCase: test batch insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = "stb_name" tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": {"value": 1, "type": "bigint"}, "tags": {"t1": {"value": 3, "type": "bigint"}, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}}, @@ -1319,7 +1319,7 @@ class TDTestCase: test multi insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") sql_list = list() stb_name = tdCom.getLongName(8, "letters") tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') @@ -1335,7 +1335,7 @@ class TDTestCase: test batch error insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": {"value": "tt", "type": "bool"}, "tags": {"t1": {"value": 3, "type": "bigint"}, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}}, {"metric": "st123456", "timestamp": {"value": 1626006933641000000, "type": "ns"}, "value": {"value": 9, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}] try: @@ -1349,7 +1349,7 @@ class TDTestCase: test multi cols insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(c_multi_tag=True, value_type=value_type)[0] try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -1362,7 +1362,7 @@ class TDTestCase: test blank col insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(c_blank_tag=True, value_type=value_type)[0] try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -1375,7 +1375,7 @@ class TDTestCase: test blank tag insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(t_blank_tag=True, value_type=value_type)[0] try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -1388,7 +1388,7 @@ class TDTestCase: check nchar ---> chinese """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(chinese_tag=True) self.resCmp(input_json, stb_name) @@ -1397,7 +1397,7 @@ class TDTestCase: multi_field ''' tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(multi_field_tag=True, value_type=value_type)[0] try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) @@ -1407,7 +1407,7 @@ class TDTestCase: def spellCheckCase(self): tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(8, "letters") input_json_list = [{"metric": f'{stb_name}_1', "timestamp": {"value": 1626006833639000000, "type": "Ns"}, "value": {"value": 1, "type": "Bigint"}, "tags": {"t1": {"value": 127, "type": "tinYint"}}}, {"metric": f'{stb_name}_2', "timestamp": {"value": 1626006833639000001, "type": "nS"}, "value": {"value": 32767, "type": "smallInt"}, "tags": {"t1": {"value": 32767, "type": "smallInt"}}}, @@ -1426,7 +1426,7 @@ class TDTestCase: def tbnameTagsColsNameCheckCase(self): tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = {'metric': 'rFa$sta', 'timestamp': {'value': 1626006834, 'type': 's'}, 'value': {'value': True, 'type': 'bool'}, 'tags': {'Tt!0': {'value': False, 'type': 'bool'}, 'tT@1': {'value': 127, 'type': 'tinyint'}, 't@2': {'value': 32767, 'type': 'smallint'}, 't$3': {'value': 2147483647, 'type': 'int'}, 't%4': {'value': 9223372036854775807, 'type': 'bigint'}, 't^5': {'value': 11.12345027923584, 'type': 'float'}, 't&6': {'value': 22.123456789, 'type': 'double'}, 't*7': {'value': 'binaryTagValue', 'type': 'binary'}, 't!@#$%^&*()_+[];:<>?,9': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': 'rFas$ta_1'}} self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) query_sql = 'select * from `rFa$sta`' @@ -1441,7 +1441,7 @@ class TDTestCase: metric value "." trans to "_" """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genFullTypeJson(point_trans_tag=True, value_type=value_type)[0] self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) tdSql.execute("drop table `.point.trans.test`") @@ -1509,7 +1509,7 @@ class TDTestCase: thread input different stb """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json = self.genSqlList(value_type=value_type)[0] self.multiThreadRun(self.genMultiThreadSeq(input_json)) tdSql.query(f"show tables;") @@ -1520,7 +1520,7 @@ class TDTestCase: thread input same stb tb, different data, result keep first data """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type)) self.resCmp(input_json, stb_name) @@ -1538,7 +1538,7 @@ class TDTestCase: thread input same stb tb, different data, add columes and tags, result keep first data """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type)) self.resCmp(input_json, stb_name) @@ -1556,7 +1556,7 @@ class TDTestCase: thread input same stb tb, different data, minus columes and tags, result keep first data """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type)) self.resCmp(input_json, stb_name) @@ -1574,7 +1574,7 @@ class TDTestCase: thread input same stb, different tb, different data """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type)) self.resCmp(input_json, stb_name) s_stb_d_tb_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[4] @@ -1587,7 +1587,7 @@ class TDTestCase: thread input same stb, different tb, different data, add col, mul tag """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary")) self.resCmp(input_json, stb_name) s_stb_d_tb_m_tag_list = [({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "omfdhyom", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'), @@ -1605,7 +1605,7 @@ class TDTestCase: thread input same stb, different tb, different data, add tag, mul col """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type)) self.resCmp(input_json, stb_name) s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[6] @@ -1618,7 +1618,7 @@ class TDTestCase: thread input same stb tb, different ts """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary")) self.resCmp(input_json, stb_name) @@ -1638,7 +1638,7 @@ class TDTestCase: thread input same stb tb, different ts, add col, mul tag """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary")) self.resCmp(input_json, stb_name) @@ -1660,7 +1660,7 @@ class TDTestCase: thread input same stb tb, different ts, add tag, mul col """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary")) self.resCmp(input_json, stb_name) @@ -1683,7 +1683,7 @@ class TDTestCase: thread input same stb, different tb, data, ts """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type)) self.resCmp(input_json, stb_name) s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[10] @@ -1696,7 +1696,7 @@ class TDTestCase: thread input same stb, different tb, data, ts, add col, mul tag """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary")) self.resCmp(input_json, stb_name) s_stb_d_tb_d_ts_m_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'), diff --git a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py index 3c47a65746b89c96b77408b6c910c88a8703e147..209cfb724e460207493dc2ca1ab0dd3522eb333b 100644 --- a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py +++ b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py @@ -30,7 +30,7 @@ if platform.system().lower() == 'windows': class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) + tdSql.init(conn.cursor(), False) self._conn = conn self.smlChildTableName_value = "id" @@ -351,7 +351,7 @@ class TDTestCase: normal tags and cols, one for every elm """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(protocol=protocol) self.resCmp(input_sql, stb_name, protocol=protocol) @@ -360,7 +360,7 @@ class TDTestCase: check all normal type """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"] for t_type in full_type_list: input_sql, stb_name = self.genFullTypeSql(t0=t_type, protocol=protocol) @@ -375,7 +375,7 @@ class TDTestCase: binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"' ''' tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"' nchar_symbols = f'L{binary_symbols}' input_sql1, stb_name1 = self.genFullTypeSql(value=binary_symbols, t7=binary_symbols, t8=nchar_symbols, protocol=protocol) @@ -388,7 +388,7 @@ class TDTestCase: test ts list --> ["1626006833640ms", "1626006834s", "1626006822639022"] """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(ts=1626006833640) self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MILLI_SECOND.value) input_sql, stb_name = self.genFullTypeSql(ts=1626006833640) @@ -407,7 +407,7 @@ class TDTestCase: def openTstbTelnetTsCheckCase(self): tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 0 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64' stb_name = input_sql.split(" ")[0] self.resCmp(input_sql, stb_name, ts=0) @@ -431,7 +431,7 @@ class TDTestCase: eg: t0=**,id=**,t1=** """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, protocol=protocol) self.resCmp(input_sql, stb_name, protocol=protocol) @@ -441,7 +441,7 @@ class TDTestCase: eg: id and ID """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True, protocol=protocol) self.resCmp(input_sql, stb_name, protocol=protocol) input_sql, stb_name = self.genFullTypeSql(id_mixul_tag=True, protocol=protocol) @@ -454,7 +454,7 @@ class TDTestCase: id not exist """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True, protocol=protocol) self.resCmp(input_sql, stb_name, protocol=protocol) query_sql = f"select tbname from {stb_name}" @@ -470,10 +470,10 @@ class TDTestCase: """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') for input_sql in [self.genLongSql(128)[0]]: - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) for input_sql in [self.genLongSql(129)[0]]: - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) raise Exception("should not reach here") @@ -486,7 +486,7 @@ class TDTestCase: mix "`~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?" """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") rstr = list("~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?") for i in rstr: input_sql, stb_name = self.genFullTypeSql(tb_name=f"\"aaa{i}bbb\"", protocol=protocol) @@ -498,7 +498,7 @@ class TDTestCase: id is start with num """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(tb_name="1aaabbb", protocol=protocol) self.resCmp(input_sql, stb_name, protocol=protocol) @@ -507,7 +507,7 @@ class TDTestCase: check now unsupported """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(ts="now")[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -520,7 +520,7 @@ class TDTestCase: check date format ts unsupported """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -533,7 +533,7 @@ class TDTestCase: check ts format like 16260068336390us19 """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(ts="16260068336390us19")[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -551,7 +551,7 @@ class TDTestCase: tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') stb_name_192 = tdCom.getLongName(len=192, mode="letters") tb_name_192 = tdCom.getLongName(len=192, mode="letters") - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name_192, tb_name=tb_name_192) self.resCmp(input_sql, stb_name) tdSql.query(f'select * from {stb_name}') @@ -581,7 +581,7 @@ class TDTestCase: check tag name limit <= 62 """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tag_name = tdCom.getLongName(61, "letters") tag_name = f'T{tag_name}' stb_name = tdCom.getLongName(7, "letters") @@ -599,7 +599,7 @@ class TDTestCase: check full type tag value limit """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # nchar # * legal nchar could not be larger than 16374/4 stb_name = tdCom.getLongName(7, "letters") @@ -618,12 +618,12 @@ class TDTestCase: check full type col value limit """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # i8 for value in ["-128i8", "127i8"]: input_sql, stb_name = self.genFullTypeSql(value=value) self.resCmp(input_sql, stb_name) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in ["-129i8", "128i8"]: input_sql = self.genFullTypeSql(value=value)[0] try: @@ -632,11 +632,11 @@ class TDTestCase: except SchemalessError as err: tdSql.checkNotEqual(err.errno, 0) # i16 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in ["-32768i16"]: input_sql, stb_name = self.genFullTypeSql(value=value) self.resCmp(input_sql, stb_name) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in ["-32769i16", "32768i16"]: input_sql = self.genFullTypeSql(value=value)[0] try: @@ -646,11 +646,11 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # i32 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in ["-2147483648i32"]: input_sql, stb_name = self.genFullTypeSql(value=value) self.resCmp(input_sql, stb_name) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in ["-2147483649i32", "2147483648i32"]: input_sql = self.genFullTypeSql(value=value)[0] try: @@ -660,11 +660,11 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # i64 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in ["-9223372036854775808i64"]: input_sql, stb_name = self.genFullTypeSql(value=value) self.resCmp(input_sql, stb_name) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in ["-9223372036854775809i64", "9223372036854775808i64"]: input_sql = self.genFullTypeSql(value=value)[0] try: @@ -674,12 +674,12 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # f32 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]: input_sql, stb_name = self.genFullTypeSql(value=value) self.resCmp(input_sql, stb_name) # * limit set to 4028234664*(10**38) - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]: input_sql = self.genFullTypeSql(value=value)[0] try: @@ -689,12 +689,12 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # f64 - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") for value in [f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64', f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64']: input_sql, stb_name = self.genFullTypeSql(value=value) self.resCmp(input_sql, stb_name) # # * limit set to 1.797693134862316*(10**308) - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # for value in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']: # input_sql = self.genFullTypeSql(value=value)[0] # try: @@ -704,12 +704,12 @@ class TDTestCase: # tdSql.checkNotEqual(err.errno, 0) # # # binary - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # stb_name = tdCom.getLongName(7, "letters") # input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16374, "letters")}" t0=t' # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16375, "letters")}" t0=t' # try: # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -719,12 +719,12 @@ class TDTestCase: # # nchar # # * legal nchar could not be larger than 16374/4 - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # stb_name = tdCom.getLongName(7, "letters") # input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4093, "letters")}" t0=t' # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) - # tdCom.cleanTb() + # tdCom.cleanTb(dbname="test") # input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4094, "letters")}" t0=t' # try: # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -738,7 +738,7 @@ class TDTestCase: test illegal tag col value """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # bool for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]: input_sql1, stb_name = self.genFullTypeSql(t0=i) @@ -774,7 +774,7 @@ class TDTestCase: check blank case ''' tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") # input_sql_list = [f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc aaa" t0=t', # f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0="abaaa"', # f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=L"abaaa"', @@ -792,7 +792,7 @@ class TDTestCase: check duplicate Id Tag Col """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql_id = self.genFullTypeSql(id_double_tag=True)[0] try: self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.TELNET.value, None) @@ -815,7 +815,7 @@ class TDTestCase: case no id when stb exist """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", value="f") self.resCmp(input_sql, stb_name) input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", value="f") @@ -828,7 +828,7 @@ class TDTestCase: check duplicate insert when stb exist """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -840,7 +840,7 @@ class TDTestCase: check length increase """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) tb_name = tdCom.getLongName(5, "letters") @@ -858,7 +858,7 @@ class TDTestCase: * col is added with value when update==1 """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") for db_update_tag in [0, 1]: if db_update_tag == 1 : @@ -885,7 +885,7 @@ class TDTestCase: check tag count add """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", value="f") self.resCmp(input_sql, stb_name) @@ -902,7 +902,7 @@ class TDTestCase: insert two table, keep tag unchange, change col """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(t0="f", value="f", id_noexist_tag=True) self.resCmp(input_sql, stb_name) tb_name1 = self.getNoIdTbName(stb_name) @@ -925,7 +925,7 @@ class TDTestCase: check nchar length limit """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(7, "letters") input_sql = f'{stb_name} 1626006833640 f t2={tdCom.getLongName(1, "letters")}' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -949,7 +949,7 @@ class TDTestCase: test batch insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(8, "letters") tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') @@ -976,7 +976,7 @@ class TDTestCase: test multi insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") sql_list = [] stb_name = tdCom.getLongName(8, "letters") tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 nchar(10))') @@ -992,7 +992,7 @@ class TDTestCase: test batch error insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(8, "letters") lines = ["st123456 1626006833640 3i 64 t1=3i64 t2=4f64 t3=\"t3\"", f"{stb_name} 1626056811823316532ns tRue t2=5f64 t3=L\"ste\""] @@ -1007,7 +1007,7 @@ class TDTestCase: test multi cols insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(c_multi_tag=True)[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -1020,7 +1020,7 @@ class TDTestCase: test blank col insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(c_blank_tag=True)[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -1033,7 +1033,7 @@ class TDTestCase: test blank tag insert """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(t_blank_tag=True)[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -1046,7 +1046,7 @@ class TDTestCase: check nchar ---> chinese """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(chinese_tag=True) self.resCmp(input_sql, stb_name) @@ -1055,7 +1055,7 @@ class TDTestCase: multi_field ''' tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(multi_field_tag=True)[0] try: self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -1065,7 +1065,7 @@ class TDTestCase: def spellCheckCase(self): tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(8, "letters") input_sql_list = [f'{stb_name}_1 1626006833640 127I8 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', f'{stb_name}_2 1626006833640 32767I16 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', @@ -1086,7 +1086,7 @@ class TDTestCase: metric value "." trans to "_" """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(point_trans_tag=True, protocol=protocol)[0] if protocol == 'telnet-tcp': stb_name = f'`{input_sql.split(" ")[1]}`' @@ -1097,7 +1097,7 @@ class TDTestCase: def defaultTypeCheckCase(self): tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") stb_name = tdCom.getLongName(8, "letters") input_sql_list = [f'{stb_name}_1 1626006833640 9223372036854775807 t0=f t1=127 t2=32767i16 t3=2147483647i32 t4=9223372036854775807 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \ f'{stb_name}_2 1626006833641 22.123456789 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789 t7="vozamcts" t8=L"ncharTagValue"', \ @@ -1110,7 +1110,7 @@ class TDTestCase: def tbnameTagsColsNameCheckCase(self): tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") if self.smlChildTableName_value == "ID": input_sql = 'rFa$sta 1626006834 9223372036854775807 id=rFas$ta_1 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"' self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) @@ -1135,7 +1135,7 @@ class TDTestCase: stb = "put" """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genFullTypeSql(tcp_keyword_tag=True, protocol=protocol)[0] stb_name = f'`{input_sql.split(" ")[1]}`' self.resCmp(input_sql, stb_name, protocol=protocol) @@ -1204,7 +1204,7 @@ class TDTestCase: thread input different stb """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql = self.genSqlList()[0] print(input_sql) self.multiThreadRun(self.genMultiThreadSeq(input_sql)) @@ -1216,7 +1216,7 @@ class TDTestCase: thread input same stb tb, different data, result keep first data """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) @@ -1235,7 +1235,7 @@ class TDTestCase: thread input same stb tb, different data, add columes and tags, result keep first data """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) @@ -1254,7 +1254,7 @@ class TDTestCase: thread input same stb tb, different data, minus columes and tags, result keep first data """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) @@ -1273,7 +1273,7 @@ class TDTestCase: thread input same stb, different tb, different data """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4] @@ -1286,7 +1286,7 @@ class TDTestCase: thread input same stb, different tb, different data, add col, mul tag """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) s_stb_d_tb_m_tag_list = [(f'{stb_name} 1626006833640 "omfdhyom" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \ @@ -1303,7 +1303,7 @@ class TDTestCase: thread input same stb, different tb, different data, add tag, mul col """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name)[6] @@ -1316,7 +1316,7 @@ class TDTestCase: thread input same stb tb, different ts """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) @@ -1336,7 +1336,7 @@ class TDTestCase: thread input same stb tb, different ts, add col, mul tag """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) @@ -1354,7 +1354,7 @@ class TDTestCase: thread input same stb tb, different ts, add tag, mul col """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) @@ -1377,7 +1377,7 @@ class TDTestCase: thread input same stb, different tb, data, ts """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10] @@ -1390,7 +1390,7 @@ class TDTestCase: thread input same stb, different tb, data, ts, add col, mul tag """ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') - tdCom.cleanTb() + tdCom.cleanTb(dbname="test") input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) s_stb_d_tb_d_ts_m_tag_list = [(f'{stb_name} 0 "mnpmtzul" t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \ diff --git a/tests/system-test/1-insert/time_range_wise.py b/tests/system-test/1-insert/time_range_wise.py index 8f61da221d5484765c6b03f1431bc7ce264d4d4a..c31d8d25475e24107c7319b86ad01112fbe93272 100644 --- a/tests/system-test/1-insert/time_range_wise.py +++ b/tests/system-test/1-insert/time_range_wise.py @@ -293,7 +293,7 @@ class TDTestCase: dbname = tdSql.getData(0,0) tdSql.query("select * from information_schema.ins_databases") for index , value in enumerate(tdSql.cursor.description): - if value[0] == "retention": + if value[0] == "retentions": r_index = index break for row in tdSql.queryResult: diff --git a/tests/system-test/2-query/avg.py b/tests/system-test/2-query/avg.py index 2afcc29ac892e9cd184a41a139209e1a3bcf2b54..884b8c087c81b84686ed53c9600862efceac342c 100644 --- a/tests/system-test/2-query/avg.py +++ b/tests/system-test/2-query/avg.py @@ -361,7 +361,7 @@ class TDTestCase: tdSql.error( f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - self.check_avg(f"select avg(c1), avg(c2), avg(c3) , avg(c4), avg(c5) ,avg(c6) from {dbname}.sub1_bound " , f" select sum(c1)/count(c1), sum(c2)/count(c2) ,sum(c3)/count(c3), sum(c4)/count(c4), sum(c5)/count(c5) ,sum(c6)/count(c6) from {dbname}.sub1_bound ") + #self.check_avg(f"select avg(c1), avg(c2), avg(c3) , avg(c4), avg(c5) ,avg(c6) from {dbname}.sub1_bound " , f" select sum(c1)/count(c1), sum(c2)/count(c2) ,sum(c3)/count(c3), sum(c4)/count(c4), sum(c5)/count(c5) ,sum(c6)/count(c6) from {dbname}.sub1_bound ") # check basic elem for table per row @@ -372,7 +372,7 @@ class TDTestCase: tdSql.checkData(0,2,14042.142857143) tdSql.checkData(0,3,53.571428571) tdSql.checkData(0,4,5.828571332045761e+37) - # tdSql.checkData(0,5,None) + tdSql.checkData(0,5,None) # check + - * / in functions @@ -382,7 +382,7 @@ class TDTestCase: tdSql.checkData(0,2,14042.142857143) tdSql.checkData(0,3,26.785714286) tdSql.checkData(0,4,2.9142856660228804e+37) - # tdSql.checkData(0,5,None) + tdSql.checkData(0,5,None) diff --git a/tests/system-test/2-query/function_diff.py b/tests/system-test/2-query/function_diff.py index fd5d6ea1cf1d42623443cbe13eb60aac6b9e80ac..946453bb23137d6ebbb67f8d588a67e054f5c2f1 100644 --- a/tests/system-test/2-query/function_diff.py +++ b/tests/system-test/2-query/function_diff.py @@ -193,43 +193,38 @@ class TDTestCase: # case17: only support normal table join case17 = { - "col": "t1.c1", - "table_expr": "t1, t2", - "condition": "where t1.ts=t2.ts" + "col": "table1.c1 ", + "table_expr": "db.t1 as table1, db.t2 as table2", + "condition": "where table1.ts=table2.ts" } self.checkdiff(**case17) - # case18~19: with group by - # case18 = { - # "table_expr": "db.t1", - # "condition": "group by c6" - # } - # self.checkdiff(**case18) + # case18~19: with group by , function diff not support group by + case19 = { - "table_expr": "db.stb1", + "table_expr": "db.stb1 where tbname =='t0' ", "condition": "partition by tbname order by tbname" # partition by tbname } self.checkdiff(**case19) - # # case20~21: with order by - # case20 = {"condition": "order by ts"} - # self.checkdiff(**case20) + # case20~21: with order by , Not a single-group group function - # # case22: with union + # case22: with union # case22 = { - # "condition": "union all select diff(c1) from t2" + # "condition": "union all select diff(c1) from db.t2 " # } # self.checkdiff(**case22) + tdSql.query("select count(c1) from db.t1 union all select count(c1) from db.t2") # case23: with limit/slimit case23 = { "condition": "limit 1" } self.checkdiff(**case23) - # case24 = { - # "table_expr": "db.stb1", - # "condition": "group by tbname slimit 1 soffset 1" - # } - # self.checkdiff(**case24) + case24 = { + "table_expr": "db.stb1", + "condition": "partition by tbname order by tbname slimit 1 soffset 1" + } + self.checkdiff(**case24) pass @@ -284,9 +279,9 @@ class TDTestCase: tdSql.query(self.diff_query_form(alias=", c2")) # mix with other 1 # tdSql.error(self.diff_query_form(table_expr="db.stb1")) # select stb directly stb_join = { - "col": "stb1.c1", - "table_expr": "stb1, stb2", - "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + "col": "stable1.c1", + "table_expr": "db.stb1 as stable1, db.stb2 as stable2", + "condition": "where stable1.ts=stable2.ts and stable1.st1=stable2.st2 order by stable1.ts" } tdSql.query(self.diff_query_form(**stb_join)) # stb join interval_sql = { @@ -315,20 +310,20 @@ class TDTestCase: for i in range(tbnum): for j in range(data_row): tdSql.execute( - f"insert into t{i} values (" + f"insert into db.t{i} values (" f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" ) tdSql.execute( - f"insert into t{i} values (" + f"insert into db.t{i} values (" f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" ) tdSql.execute( - f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + f"insert into db.tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" ) pass @@ -349,8 +344,8 @@ class TDTestCase: "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" ) for i in range(tbnum): - tdSql.execute(f"create table t{i} using db.stb1 tags({i})") - tdSql.execute(f"create table tt{i} using db.stb2 tags({i})") + tdSql.execute(f"create table db.t{i} using db.stb1 tags({i})") + tdSql.execute(f"create table db.tt{i} using db.stb2 tags({i})") pass def diff_support_stable(self): @@ -398,8 +393,8 @@ class TDTestCase: tdLog.printNoPrefix("######## insert only NULL test:") for i in range(tbnum): - tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") - tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime + 5})") self.diff_current_query() self.diff_error_query() @@ -430,9 +425,9 @@ class TDTestCase: tdLog.printNoPrefix("######## insert data mix with NULL test:") for i in range(tbnum): - tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") - tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") - tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") self.diff_current_query() self.diff_error_query() diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 9348a8ca8f520e3ee8ea7f1c56f211dd53cd7dd4..5550519e05249de13d1267dd2a8f5bc1b10fae6d 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -551,6 +551,79 @@ class TDTestCase: tdSql.checkData(0, 0, 15) tdSql.checkData(1, 0, 15) + tdLog.printNoPrefix("==========step9:test multi-interp cases") + tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(null)") + tdSql.checkRows(5) + tdSql.checkCols(4) + + for i in range (tdSql.queryCols): + tdSql.checkData(0, i, None) + tdSql.checkData(1, i, None) + tdSql.checkData(2, i, 15) + tdSql.checkData(3, i, None) + tdSql.checkData(4, i, None) + + tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(value, 1)") + tdSql.checkRows(5) + tdSql.checkCols(4) + + for i in range (tdSql.queryCols): + tdSql.checkData(0, i, 1) + tdSql.checkData(1, i, 1) + tdSql.checkData(2, i, 15) + tdSql.checkData(3, i, 1) + tdSql.checkData(4, i, 1) + + tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(prev)") + tdSql.checkRows(5) + tdSql.checkCols(4) + + for i in range (tdSql.queryCols): + tdSql.checkData(0, i, 5) + tdSql.checkData(1, i, 5) + tdSql.checkData(2, i, 15) + tdSql.checkData(3, i, 15) + tdSql.checkData(4, i, 15) + + tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(next)") + tdSql.checkRows(3) + tdSql.checkCols(4) + + for i in range (tdSql.queryCols): + tdSql.checkData(0, i, 15) + tdSql.checkData(1, i, 15) + tdSql.checkData(2, i, 15) + + tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(linear)") + tdSql.checkRows(1) + tdSql.checkCols(4) + + for i in range (tdSql.queryCols): + tdSql.checkData(0, i, 15) + + tdLog.printNoPrefix("==========step10:test error cases") + + tdSql.error(f"select interp(c0) from {dbname}.{tbname}") + tdSql.error(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05')") + tdSql.error(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d)") + tdSql.error(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') fill(null)") + tdSql.error(f"select interp(c0) from {dbname}.{tbname} every(1s) fill(null)") + tdSql.error(f"select interp(c0) from {dbname}.{tbname} where ts >= '2020-02-10 00:00:05' and ts <= '2020-02-15 00:00:05' every(1s) fill(null)") + + # input can only be numerical types + tdSql.error(f"select interp(ts) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + tdSql.error(f"select interp(c6) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + tdSql.error(f"select interp(c7) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + tdSql.error(f"select interp(c8) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + + # input can only be columns + tdSql.error(f"select interp(1) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + tdSql.error(f"select interp(1.5) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + tdSql.error(f"select interp(true) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + tdSql.error(f"select interp(false) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + tdSql.error(f"select interp('abcd') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + tdSql.error(f"select interp('中文字符') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/2-query/irate.py b/tests/system-test/2-query/irate.py index a64e7695c3e3fa7b27e9288eeef59f1de838db51..408f4b374950be29c0f6df06593fb3b216ff409f 100644 --- a/tests/system-test/2-query/irate.py +++ b/tests/system-test/2-query/irate.py @@ -69,7 +69,7 @@ class TDTestCase: comput_irate_value = origin_result[1][0]*1000/( origin_result[1][-1] - origin_result[0][-1]) else: comput_irate_value = (origin_result[1][0] - origin_result[0][0])*1000/( origin_result[1][-1] - origin_result[0][-1]) - if abs(comput_irate_value - irate_value) <= 0.0000001: + if abs(comput_irate_value - irate_value) <= 0.001: # set as 0.001 avoid floating point precision calculation errors tdLog.info(" irate work as expected , sql is %s "% irate_sql) else: tdLog.exit(" irate work not as expected , sql is %s "% irate_sql) diff --git a/tests/system-test/2-query/join2.py b/tests/system-test/2-query/join2.py index 5533cb840e29d2e0b109687f2aa3189d2c26a381..5c8fe0f0f96e6c28aa1ef70240b3ef4d5b0598fa 100644 --- a/tests/system-test/2-query/join2.py +++ b/tests/system-test/2-query/join2.py @@ -52,12 +52,12 @@ class TDTestCase: return query_condition - def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False): + def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False, alias_tb1="tb1", alias_tb2="tb2"): table_reference = tb_list[0] join_condition = table_reference join = "inner join" if INNER else "join" for i in range(len(tb_list[1:])): - join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}" + join_condition += f" as {alias_tb1} {join} {tb_list[i+1]} as {alias_tb2} on {alias_tb1}.{filter}={alias_tb2}.{filter}" return join_condition @@ -123,28 +123,28 @@ class TDTestCase: sqls = [] __join_tblist = self.__join_tblist for join_tblist in __join_tblist: - for join_tb in join_tblist: - select_claus_list = self.__query_condition(join_tb) - for select_claus in select_claus_list: - group_claus = self.__group_condition( col=select_claus) - where_claus = self.__where_condition( query_conditon=select_claus ) - having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" ) - sqls.extend( - ( - # self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus), - self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, having_claus), - self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus), - # self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus), - self.__gen_sql(select_claus, self.__join_condition(join_tblist), having_claus), - self.__gen_sql(select_claus, self.__join_condition(join_tblist)), - # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus), - self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus), - self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, ), - self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), having_claus ), - # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ), - self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True) ), - ) + alias_tb = "tb1" + select_claus_list = self.__query_condition(alias_tb) + for select_claus in select_claus_list: + group_claus = self.__group_condition( col=select_claus) + where_claus = self.__where_condition( query_conditon=select_claus ) + having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" ) + sqls.extend( + ( + # self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), where_claus, having_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), where_claus), + # self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), having_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb)), + # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True), where_claus, having_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True), where_claus, ), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True), having_claus ), + # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True) ), ) + ) return list(filter(None, sqls)) def __join_check(self,): @@ -341,10 +341,8 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - tdDnodes.stop(1) - tdDnodes.start(1) + tdSql.execute(f"flush database db") - tdSql.execute("use db") tdLog.printNoPrefix("==========step4:after wal, all check again ") self.all_test() diff --git a/tests/system-test/2-query/json_tag.py b/tests/system-test/2-query/json_tag.py index d9d7ef23003bd82cca04e35a5e02aa535da409c7..d9715579aed4878c1cf17642824718d412a77511 100644 --- a/tests/system-test/2-query/json_tag.py +++ b/tests/system-test/2-query/json_tag.py @@ -1,25 +1,8 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, db_test.stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - # -*- coding: utf-8 -*- -import imp -import sys -import taos from util.log import tdLog from util.cases import tdCases from util.sql import tdSql -import json -import os - class TDTestCase: def caseDescription(self): @@ -31,35 +14,33 @@ class TDTestCase: return def init(self, conn, logSql): - self.testcasePath = os.path.split(__file__)[0] - self.testcaseFilename = os.path.split(__file__)[-1] - # os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), True) + tdSql.init(conn.cursor(), False) def run(self): # tdSql.prepare() - tdSql.execute('drop database if exists db') - tdSql.execute('create database db vgroups 1') - tdSql.execute('use db') + dbname = "db" + tdSql.execute(f'drop database if exists {dbname}') + tdSql.execute(f'create database {dbname} vgroups 1') + tdSql.execute(f'use {dbname}') print("============== STEP 1 ===== prepare data & validate json string") - tdSql.error("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json, tagint int)") - tdSql.error("create table if not exists jsons1(ts timestamp, data json) tags(tagint int)") - tdSql.execute("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)") - tdSql.execute("insert into jsons1_1 using jsons1 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 1, false, 'json1', '你是') (1591060608000, 23, true, '等等', 'json')") - tdSql.execute("insert into jsons1_2 using jsons1 tags('{\"tag1\":5,\"tag2\":\"beijing\"}') values (1591060628000, 2, true, 'json2', 'sss')") - tdSql.execute("insert into jsons1_3 using jsons1 tags('{\"tag1\":false,\"tag2\":\"beijing\"}') values (1591060668000, 3, false, 'json3', 'efwe')") - tdSql.execute("insert into jsons1_4 using jsons1 tags('{\"tag1\":null,\"tag2\":\"shanghai\",\"tag3\":\"hello\"}') values (1591060728000, 4, true, 'json4', '323sd')") - tdSql.execute("insert into jsons1_5 using jsons1 tags('{\"tag1\":1.232, \"tag2\":null}') values(1591060928000, 1, false, '你就会', 'ewe')") - tdSql.execute("insert into jsons1_6 using jsons1 tags('{\"tag1\":11,\"tag2\":\"\",\"tag2\":null}') values(1591061628000, 11, false, '你就会','')") - tdSql.execute("insert into jsons1_7 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws')") + tdSql.error(f"create table if not exists {dbname}.jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json, tagint int)") + tdSql.error(f"create table if not exists {dbname}.jsons1(ts timestamp, data json) tags(tagint int)") + tdSql.execute(f"create table if not exists {dbname}.jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)") + tdSql.execute(f"insert into {dbname}.jsons1_1 using {dbname}.jsons1 tags('{{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}}') values(1591060618000, 1, false, 'json1', '你是') (1591060608000, 23, true, '等等', 'json')") + tdSql.execute(f"insert into {dbname}.jsons1_2 using {dbname}.jsons1 tags('{{\"tag1\":5,\"tag2\":\"beijing\"}}') values (1591060628000, 2, true, 'json2', 'sss')") + tdSql.execute(f"insert into {dbname}.jsons1_3 using {dbname}.jsons1 tags('{{\"tag1\":false,\"tag2\":\"beijing\"}}') values (1591060668000, 3, false, 'json3', 'efwe')") + tdSql.execute(f"insert into {dbname}.jsons1_4 using {dbname}.jsons1 tags('{{\"tag1\":null,\"tag2\":\"shanghai\",\"tag3\":\"hello\"}}') values (1591060728000, 4, true, 'json4', '323sd')") + tdSql.execute(f"insert into {dbname}.jsons1_5 using {dbname}.jsons1 tags('{{\"tag1\":1.232, \"tag2\":null}}') values(1591060928000, 1, false, '你就会', 'ewe')") + tdSql.execute(f"insert into {dbname}.jsons1_6 using {dbname}.jsons1 tags('{{\"tag1\":11,\"tag2\":\"\",\"tag2\":null}}') values(1591061628000, 11, false, '你就会','')") + tdSql.execute(f"insert into {dbname}.jsons1_7 using {dbname}.jsons1 tags('{{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}}') values(1591062628000, 2, NULL, '你就会', 'dws')") # test duplicate key using the first one. elimate empty key - tdSql.execute("CREATE TABLE if not exists jsons1_8 using jsons1 tags('{\"tag1\":null, \"tag1\":true, \"tag1\":45, \"1tag$\":2, \" \":90, \"\":32}')") - tdSql.query("select jtag from jsons1_8") - tdSql.checkRows(0); + tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_8 using {dbname}.jsons1 tags('{{\"tag1\":null, \"tag1\":true, \"tag1\":45, \"1tag$\":2, \" \":90, \"\":32}}')") + tdSql.query(f"select jtag from {dbname}.jsons1_8") + tdSql.checkRows(0) - tdSql.query("select ts,jtag from jsons1 order by ts limit 2,3") + tdSql.query(f"select ts,jtag from {dbname}.jsons1 order by ts limit 2,3") tdSql.checkData(0, 0, '2020-06-02 09:17:08.000') tdSql.checkData(0, 1, '{"tag1":5,"tag2":"beijing"}') tdSql.checkData(1, 0, '2020-06-02 09:17:48.000') @@ -67,7 +48,7 @@ class TDTestCase: tdSql.checkData(2, 0, '2020-06-02 09:18:48.000') tdSql.checkData(2, 1, '{"tag1":null,"tag2":"shanghai","tag3":"hello"}') - tdSql.query("select ts,jtag->'tag1' from jsons1 order by ts limit 2,3") + tdSql.query(f"select ts,jtag->'tag1' from {dbname}.jsons1 order by ts limit 2,3") tdSql.checkData(0, 0, '2020-06-02 09:17:08.000') tdSql.checkData(0, 1, '5.000000000') tdSql.checkData(1, 0, '2020-06-02 09:17:48.000') @@ -76,163 +57,163 @@ class TDTestCase: tdSql.checkData(2, 1, 'null') # test empty json string, save as jtag is NULL - tdSql.execute("insert into jsons1_9 using jsons1 tags('\t') values (1591062328000, 24, NULL, '你就会', '2sdw')") - tdSql.execute("CREATE TABLE if not exists jsons1_10 using jsons1 tags('')") - tdSql.execute("CREATE TABLE if not exists jsons1_11 using jsons1 tags(' ')") - tdSql.execute("CREATE TABLE if not exists jsons1_12 using jsons1 tags('{}')") - tdSql.execute("CREATE TABLE if not exists jsons1_13 using jsons1 tags('null')") + tdSql.execute(f"insert into {dbname}.jsons1_9 using {dbname}.jsons1 tags('\t') values (1591062328000, 24, NULL, '你就会', '2sdw')") + tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_10 using {dbname}.jsons1 tags('')") + tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_11 using {dbname}.jsons1 tags(' ')") + tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_12 using {dbname}.jsons1 tags('{{}}')") + tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_13 using {dbname}.jsons1 tags('null')") # test invalidate json - tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('\"efwewf\"')") - tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('3333')") - tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags(76)") - tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags(hell)") - tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('33.33')") - tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('false')") - tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('[1,true]')") - tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{222}')") - tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"fe\"}')") + tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('\"efwewf\"')") + tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('3333')") + tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags(76)") + tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags(hell)") + tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('33.33')") + tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('false')") + tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('[1,true]')") + tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{222}}')") + tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"fe\"}}')") # test invalidate json key, key must can be printed assic char - tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":[1,true]}')") - tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":{}}')") - tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"。loc\":\"fff\"}')") - tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"\t\":\"fff\"}')") - tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"试试\":\"fff\"}')") + tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"tag1\":[1,true]}}')") + tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"tag1\":{{}}}}')") + tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"。loc\":\"fff\"}}')") + tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"\t\":\"fff\"}}')") + tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"试试\":\"fff\"}}')") # test invalidate json value, value number can not be inf,nan TD-12166 - tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"k\":1.8e308}')") - tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"k\":-1.8e308}')") + tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"k\":1.8e308}}')") + tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"k\":-1.8e308}}')") #test length limit char1= ''.join(['abcd']*64) char3= ''.join(['abcd']*1021) print(len(char3)) # 4084 - tdSql.error("CREATE TABLE if not exists jsons1_15 using jsons1 tags('{\"%s1\":5}')" % char1) # len(key)=257 - tdSql.execute("CREATE TABLE if not exists jsons1_15 using jsons1 tags('{\"%s\":5}')" % char1) # len(key)=256 - tdSql.error("CREATE TABLE if not exists jsons1_16 using jsons1 tags('{\"TSSSS\":\"%s\"}')" % char3) # len(object)=4096 - tdSql.execute("CREATE TABLE if not exists jsons1_16 using jsons1 tags('{\"TSSS\":\"%s\"}')" % char3) # len(object)=4095 - tdSql.execute("drop table if exists jsons1_15") - tdSql.execute("drop table if exists jsons1_16") + tdSql.error(f"create TABLE if not exists {dbname}.jsons1_15 using {dbname}.jsons1 tags('{{\"%s1\":5}}')" % char1) # len(key)=257 + tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_15 using {dbname}.jsons1 tags('{{\"%s\":5}}')" % char1) # len(key)=256 + tdSql.error(f"create TABLE if not exists {dbname}.jsons1_16 using {dbname}.jsons1 tags('{{\"TSSSS\":\"%s\"}}')" % char3) # len(object)=4096 + tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_16 using {dbname}.jsons1 tags('{{\"TSSS\":\"%s\"}}')" % char3) # len(object)=4095 + tdSql.execute(f"drop table if exists {dbname}.jsons1_15") + tdSql.execute(f"drop table if exists {dbname}.jsons1_16") print("============== STEP 2 ===== alter table json tag") - tdSql.error("ALTER STABLE jsons1 add tag tag2 nchar(20)") - tdSql.error("ALTER STABLE jsons1 drop tag jtag") - tdSql.error("ALTER TABLE jsons1 MODIFY TAG jtag nchar(128)") + tdSql.error(f"ALTER stable {dbname}.jsons1 add tag tag2 nchar(20)") + tdSql.error(f"ALTER stable {dbname}.jsons1 drop tag jtag") + tdSql.error(f"ALTER table {dbname}.jsons1 MODIFY TAG jtag nchar(128)") - tdSql.execute("ALTER TABLE jsons1_1 SET TAG jtag='{\"tag1\":\"femail\",\"tag2\":35,\"tag3\":true}'") - tdSql.query("select jtag from jsons1_1") + tdSql.execute(f"ALTER table {dbname}.jsons1_1 SET TAG jtag='{{\"tag1\":\"femail\",\"tag2\":35,\"tag3\":true}}'") + tdSql.query(f"select jtag from {dbname}.jsons1_1") tdSql.checkData(0, 0, '{"tag1":"femail","tag2":35,"tag3":true}') - tdSql.execute("ALTER TABLE jsons1 rename TAG jtag jtag_new") - tdSql.execute("ALTER TABLE jsons1 rename TAG jtag_new jtag") + tdSql.execute(f"ALTER table {dbname}.jsons1 rename TAG jtag jtag_new") + tdSql.execute(f"ALTER table {dbname}.jsons1 rename TAG jtag_new jtag") - tdSql.execute("create table st(ts timestamp, i int) tags(t int)") - tdSql.error("ALTER STABLE st add tag jtag json") - tdSql.error("ALTER STABLE st add column jtag json") + tdSql.execute(f"create table {dbname}.st(ts timestamp, i int) tags(t int)") + tdSql.error(f"ALTER stable {dbname}.st add tag jtag json") + tdSql.error(f"ALTER stable {dbname}.st add column jtag json") print("============== STEP 3 ===== query table") # test error syntax - tdSql.error("select * from jsons1 where jtag->tag1='beijing'") - tdSql.error("select -> from jsons1") - tdSql.error("select * from jsons1 where contains") - tdSql.error("select * from jsons1 where jtag->") - tdSql.error("select jtag->location from jsons1") - tdSql.error("select jtag contains location from jsons1") - tdSql.error("select * from jsons1 where jtag contains location") - tdSql.query("select * from jsons1 where jtag contains''") - tdSql.error("select * from jsons1 where jtag contains 'location'='beijing'") + tdSql.error(f"select * from {dbname}.jsons1 where jtag->tag1='beijing'") + tdSql.error(f"select -> from {dbname}.jsons1") + tdSql.error(f"select * from {dbname}.jsons1 where contains") + tdSql.error(f"select * from {dbname}.jsons1 where jtag->") + tdSql.error(f"select jtag->location from {dbname}.jsons1") + tdSql.error(f"select jtag contains location from {dbname}.jsons1") + tdSql.error(f"select * from {dbname}.jsons1 where jtag contains location") + tdSql.query(f"select * from {dbname}.jsons1 where jtag contains''") + tdSql.error(f"select * from {dbname}.jsons1 where jtag contains 'location'='beijing'") # test function error - tdSql.error("select avg(jtag->'tag1') from jsons1") - tdSql.error("select avg(jtag) from jsons1") - tdSql.error("select min(jtag->'tag1') from jsons1") - tdSql.error("select min(jtag) from jsons1") - tdSql.error("select ceil(jtag->'tag1') from jsons1") - tdSql.error("select ceil(jtag) from jsons1") + tdSql.error(f"select avg(jtag->'tag1') from {dbname}.jsons1") + tdSql.error(f"select avg(jtag) from {dbname}.jsons1") + tdSql.error(f"select min(jtag->'tag1') from {dbname}.jsons1") + tdSql.error(f"select min(jtag) from {dbname}.jsons1") + tdSql.error(f"select ceil(jtag->'tag1') from {dbname}.jsons1") + tdSql.error(f"select ceil(jtag) from {dbname}.jsons1") #test scalar operation - tdSql.query("select jtag contains 'tag1',jtag->'tag1' from jsons1 order by jtag->'tag1'") + tdSql.query(f"select jtag contains 'tag1',jtag->'tag1' from {dbname}.jsons1 order by jtag->'tag1'") tdSql.checkRows(9) - tdSql.query("select jtag->'tag1' like 'fe%',jtag->'tag1' from jsons1 order by jtag->'tag1'") + tdSql.query(f"select jtag->'tag1' like 'fe%',jtag->'tag1' from {dbname}.jsons1 order by jtag->'tag1'") tdSql.checkRows(9) - tdSql.query("select jtag->'tag1' not like 'fe%',jtag->'tag1' from jsons1 order by jtag->'tag1'") + tdSql.query(f"select jtag->'tag1' not like 'fe%',jtag->'tag1' from {dbname}.jsons1 order by jtag->'tag1'") tdSql.checkRows(9) - tdSql.query("select jtag->'tag1' match 'fe',jtag->'tag1' from jsons1 order by jtag->'tag1'") + tdSql.query(f"select jtag->'tag1' match 'fe',jtag->'tag1' from {dbname}.jsons1 order by jtag->'tag1'") tdSql.checkRows(9) - tdSql.query("select jtag->'tag1' nmatch 'fe',jtag->'tag1' from jsons1 order by jtag->'tag1'") + tdSql.query(f"select jtag->'tag1' nmatch 'fe',jtag->'tag1' from {dbname}.jsons1 order by jtag->'tag1'") tdSql.checkRows(9) - tdSql.query("select jtag->'tag1',jtag->'tag1'>='a' from jsons1 order by jtag->'tag1'") + tdSql.query(f"select jtag->'tag1',jtag->'tag1'>='a' from {dbname}.jsons1 order by jtag->'tag1'") tdSql.checkRows(9) # test select normal column - tdSql.query("select dataint from jsons1 order by dataint") + tdSql.query(f"select dataint from {dbname}.jsons1 order by dataint") tdSql.checkRows(9) tdSql.checkData(1, 0, 1) # test select json tag - tdSql.query("select * from jsons1") + tdSql.query(f"select * from {dbname}.jsons1") tdSql.checkRows(9) - tdSql.query("select jtag from jsons1") + tdSql.query(f"select jtag from {dbname}.jsons1") tdSql.checkRows(9) - tdSql.query("select * from jsons1 where jtag is null") + tdSql.query(f"select * from {dbname}.jsons1 where jtag is null") tdSql.checkRows(1) - tdSql.query("select * from jsons1 where jtag is not null") + tdSql.query(f"select * from {dbname}.jsons1 where jtag is not null") tdSql.checkRows(8) # test jtag is NULL - tdSql.query("select jtag from jsons1_9") + tdSql.query(f"select jtag from {dbname}.jsons1_9") tdSql.checkData(0, 0, None) # test select json tag->'key', value is string - tdSql.query("select jtag->'tag1' from jsons1_1") + tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1_1") tdSql.checkData(0, 0, '"femail"') - tdSql.query("select jtag->'tag2' from jsons1_6") + tdSql.query(f"select jtag->'tag2' from {dbname}.jsons1_6") tdSql.checkData(0, 0, '""') # test select json tag->'key', value is int - tdSql.query("select jtag->'tag2' from jsons1_1") + tdSql.query(f"select jtag->'tag2' from {dbname}.jsons1_1") tdSql.checkData(0, 0, "35.000000000") # test select json tag->'key', value is bool - tdSql.query("select jtag->'tag3' from jsons1_1") + tdSql.query(f"select jtag->'tag3' from {dbname}.jsons1_1") tdSql.checkData(0, 0, "true") # test select json tag->'key', value is null - tdSql.query("select jtag->'tag1' from jsons1_4") + tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1_4") tdSql.checkData(0, 0, "null") # test select json tag->'key', value is double - tdSql.query("select jtag->'tag1' from jsons1_5") + tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1_5") tdSql.checkData(0, 0, "1.232000000") # test select json tag->'key', key is not exist - tdSql.query("select jtag->'tag10' from jsons1_4") + tdSql.query(f"select jtag->'tag10' from {dbname}.jsons1_4") tdSql.checkData(0, 0, None) - tdSql.query("select jtag->'tag1' from jsons1") + tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1") tdSql.checkRows(9) # test header name - res = tdSql.getColNameList("select jtag->'tag1' from jsons1") + res = tdSql.getColNameList(f"select jtag->'tag1' from {dbname}.jsons1") cname_list = [] cname_list.append("jtag->'tag1'") tdSql.checkColNameList(res, cname_list) # test where with json tag - tdSql.query("select * from jsons1_1 where jtag is not null") - tdSql.query("select * from jsons1 where jtag='{\"tag1\":11,\"tag2\":\"\"}'") - tdSql.error("select * from jsons1 where jtag->'tag1'={}") + tdSql.query(f"select * from {dbname}.jsons1_1 where jtag is not null") + tdSql.error(f"select * from {dbname}.jsons1 where jtag='{{\"tag1\":11,\"tag2\":\"\"}}'") + tdSql.error(f"select * from {dbname}.jsons1 where jtag->'tag1'={{}}") # test json error - tdSql.error("select jtag + 1 from jsons1") - tdSql.error("select jtag > 1 from jsons1") - tdSql.error("select jtag like \"1\" from jsons1") - tdSql.error("select jtag in (\"1\") from jsons1") - #tdSql.error("select jtag from jsons1 where jtag > 1") - #tdSql.error("select jtag from jsons1 where jtag like 'fsss'") - #tdSql.error("select jtag from jsons1 where jtag in (1)") + tdSql.error(f"select jtag + 1 from {dbname}.jsons1") + tdSql.error(f"select jtag > 1 from {dbname}.jsons1") + tdSql.error(f"select jtag like \"1\" from {dbname}.jsons1") + tdSql.error(f"select jtag in (\"1\") from {dbname}.jsons1") + #tdSql.error(f"select jtag from {dbname}.jsons1 where jtag > 1") + #tdSql.error(f"select jtag from {dbname}.jsons1 where jtag like 'fsss'") + #tdSql.error(f"select jtag from {dbname}.jsons1 where jtag in (1)") # where json value is string - tdSql.query("select * from jsons1 where jtag->'tag2'='beijing'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'='beijing'") tdSql.checkRows(2) - tdSql.query("select dataint,tbname,jtag->'tag1',jtag from jsons1 where jtag->'tag2'='beijing' order by dataint") + tdSql.query(f"select dataint,tbname,jtag->'tag1',jtag from {dbname}.jsons1 where jtag->'tag2'='beijing' order by dataint") tdSql.checkRows(2) tdSql.checkData(0, 0, 2) tdSql.checkData(0, 1, 'jsons1_2') @@ -243,180 +224,180 @@ class TDTestCase: tdSql.checkData(1, 2, 'false') - tdSql.query("select * from jsons1 where jtag->'tag1'='beijing'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'='beijing'") tdSql.checkRows(0) - tdSql.query("select * from jsons1 where jtag->'tag1'='收到货'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'='收到货'") tdSql.checkRows(1) - tdSql.query("select * from jsons1 where jtag->'tag2'>'beijing'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'>'beijing'") tdSql.checkRows(1) - tdSql.query("select * from jsons1 where jtag->'tag2'>='beijing'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'>='beijing'") tdSql.checkRows(3) - tdSql.query("select * from jsons1 where jtag->'tag2'<'beijing'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'<'beijing'") tdSql.checkRows(2) - tdSql.query("select * from jsons1 where jtag->'tag2'<='beijing'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'<='beijing'") tdSql.checkRows(4) - tdSql.query("select * from jsons1 where jtag->'tag2'!='beijing'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'!='beijing'") tdSql.checkRows(3) - tdSql.query("select * from jsons1 where jtag->'tag2'=''") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'=''") tdSql.checkRows(2) # where json value is int - tdSql.query("select * from jsons1 where jtag->'tag1'=5") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=5") tdSql.checkRows(1) tdSql.checkData(0, 1, 2) - tdSql.query("select * from jsons1 where jtag->'tag1'=10") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=10") tdSql.checkRows(0) - tdSql.query("select * from jsons1 where jtag->'tag1'<54") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'<54") tdSql.checkRows(3) - tdSql.query("select * from jsons1 where jtag->'tag1'<=11") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'<=11") tdSql.checkRows(3) - tdSql.query("select * from jsons1 where jtag->'tag1'>4") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'>4") tdSql.checkRows(2) - tdSql.query("select * from jsons1 where jtag->'tag1'>=5") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'>=5") tdSql.checkRows(2) - tdSql.query("select * from jsons1 where jtag->'tag1'!=5") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'!=5") tdSql.checkRows(2) - tdSql.query("select * from jsons1 where jtag->'tag1'!=55") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'!=55") tdSql.checkRows(3) # where json value is double - tdSql.query("select * from jsons1 where jtag->'tag1'=1.232") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=1.232") tdSql.checkRows(1) - tdSql.query("select * from jsons1 where jtag->'tag1'<1.232") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'<1.232") tdSql.checkRows(0) - tdSql.query("select * from jsons1 where jtag->'tag1'<=1.232") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'<=1.232") tdSql.checkRows(1) - tdSql.query("select * from jsons1 where jtag->'tag1'>1.23") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'>1.23") tdSql.checkRows(3) - tdSql.query("select * from jsons1 where jtag->'tag1'>=1.232") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'>=1.232") tdSql.checkRows(3) - tdSql.query("select * from jsons1 where jtag->'tag1'!=1.232") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'!=1.232") tdSql.checkRows(2) - tdSql.query("select * from jsons1 where jtag->'tag1'!=3.232") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'!=3.232") tdSql.checkRows(3) - tdSql.query("select * from jsons1 where jtag->'tag1'/0=3") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'/0=3") tdSql.checkRows(0) - tdSql.query("select * from jsons1 where jtag->'tag1'/5=1") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'/5=1") tdSql.checkRows(1) # where json value is bool - tdSql.query("select * from jsons1 where jtag->'tag1'=true") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=true") tdSql.checkRows(0) - tdSql.query("select * from jsons1 where jtag->'tag1'=false") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=false") tdSql.checkRows(1) - tdSql.query("select * from jsons1 where jtag->'tag1'!=false") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'!=false") tdSql.checkRows(0) - tdSql.query("select * from jsons1 where jtag->'tag1'>false") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'>false") tdSql.checkRows(0) # where json value is null - tdSql.query("select * from jsons1 where jtag->'tag1'=null") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=null") tdSql.checkRows(0) # where json key is null - tdSql.query("select * from jsons1 where jtag->'tag_no_exist'=3") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag_no_exist'=3") tdSql.checkRows(0) # where json value is not exist - tdSql.query("select * from jsons1 where jtag->'tag1' is null") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' is null") tdSql.checkData(0, 0, 'jsons1_9') tdSql.checkRows(2) - tdSql.query("select * from jsons1 where jtag->'tag4' is null") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag4' is null") tdSql.checkRows(9) - tdSql.query("select * from jsons1 where jtag->'tag3' is not null") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag3' is not null") tdSql.checkRows(3) # test contains - tdSql.query("select * from jsons1 where jtag contains 'tag1'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag contains 'tag1'") tdSql.checkRows(8) - tdSql.query("select * from jsons1 where jtag contains 'tag3'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag contains 'tag3'") tdSql.checkRows(4) - tdSql.query("select * from jsons1 where jtag contains 'tag_no_exist'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag contains 'tag_no_exist'") tdSql.checkRows(0) # test json tag in where condition with and/or - tdSql.query("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='beijing'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=false and jtag->'tag2'='beijing'") tdSql.checkRows(1) - tdSql.query("select * from jsons1 where jtag->'tag1'=false or jtag->'tag2'='beijing'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=false or jtag->'tag2'='beijing'") tdSql.checkRows(2) - tdSql.query("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'") tdSql.checkRows(0) - tdSql.query("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'") tdSql.checkRows(0) - tdSql.query("select * from jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35") tdSql.checkRows(0) - tdSql.query("select * from jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35") tdSql.checkRows(0) - tdSql.query("select * from jsons1 where jtag->'tag1' is not null and jtag contains 'tag3'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' is not null and jtag contains 'tag3'") tdSql.checkRows(3) - tdSql.query("select * from jsons1 where jtag->'tag1'='femail' and jtag contains 'tag3'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'='femail' and jtag contains 'tag3'") tdSql.checkRows(2) # test with between and - tdSql.query("select * from jsons1 where jtag->'tag1' between 1 and 30") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' between 1 and 30") tdSql.checkRows(3) - tdSql.query("select * from jsons1 where jtag->'tag1' between 'femail' and 'beijing'") - tdSql.checkRows(2) + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' between 'femail' and 'beijing'") + tdSql.checkRows(0) # test with tbname/normal column - tdSql.query("select * from jsons1 where tbname = 'jsons1_1'") + tdSql.query(f"select * from {dbname}.jsons1 where tbname = 'jsons1_1'") tdSql.checkRows(2) - tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3'") + tdSql.query(f"select * from {dbname}.jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3'") tdSql.checkRows(2) - tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=3") + tdSql.query(f"select * from {dbname}.jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=3") tdSql.checkRows(0) - tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=23") + tdSql.query(f"select * from {dbname}.jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=23") tdSql.checkRows(1) # test where condition like - tdSql.query("select * from jsons1 where jtag->'tag2' like 'bei%'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2' like 'bei%'") tdSql.checkRows(2) - tdSql.query("select * from jsons1 where jtag->'tag1' like 'fe%' and jtag->'tag2' is not null") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' like 'fe%' and jtag->'tag2' is not null") tdSql.checkRows(2) # test where condition in no support in - tdSql.error("select * from jsons1 where jtag->'tag1' in ('beijing')") + tdSql.error(f"select * from {dbname}.jsons1 where jtag->'tag1' in ('beijing')") # test where condition match/nmath - tdSql.query("select * from jsons1 where jtag->'tag1' match 'ma'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' match 'ma'") tdSql.checkRows(2) - tdSql.query("select * from jsons1 where jtag->'tag1' match 'ma$'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' match 'ma$'") tdSql.checkRows(0) - tdSql.query("select * from jsons1 where jtag->'tag2' match 'jing$'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2' match 'jing$'") tdSql.checkRows(2) - tdSql.query("select * from jsons1 where jtag->'tag1' match '收到'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' match '收到'") tdSql.checkRows(1) - tdSql.query("select * from jsons1 where jtag->'tag1' nmatch 'ma'") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' nmatch 'ma'") tdSql.checkRows(1) # test distinct - tdSql.execute("insert into jsons1_14 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws')") - tdSql.query("select distinct jtag->'tag1' from jsons1") + tdSql.execute(f"insert into {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}}') values(1591062628000, 2, NULL, '你就会', 'dws')") + tdSql.query(f"select distinct jtag->'tag1' from {dbname}.jsons1") tdSql.checkRows(8) - tdSql.error("select distinct jtag from jsons1") + tdSql.error(f"select distinct jtag from {dbname}.jsons1") #test dumplicate key with normal colomn - tdSql.execute("INSERT INTO jsons1_15 using jsons1 tags('{\"tbname\":\"tt\",\"databool\":true,\"datastr\":\"是是是\"}') values(1591060828000, 4, false, 'jjsf', \"你就会\")") - tdSql.query("select * from jsons1 where jtag->'datastr' match '是' and datastr match 'js'") + tdSql.execute(f"insert into {dbname}.jsons1_15 using {dbname}.jsons1 tags('{{\"tbname\":\"tt\",\"databool\":true,\"datastr\":\"是是是\"}}') values(1591060828000, 4, false, 'jjsf', \"你就会\")") + tdSql.query(f"select * from {dbname}.jsons1 where jtag->'datastr' match '是' and datastr match 'js'") tdSql.checkRows(1) - tdSql.query("select tbname,jtag->'tbname' from jsons1 where jtag->'tbname'='tt' and tbname='jsons1_15'") + tdSql.query(f"select tbname,jtag->'tbname' from {dbname}.jsons1 where jtag->'tbname'='tt' and tbname='jsons1_15'") tdSql.checkRows(1) # test join - tdSql.execute("create table if not exists jsons2(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)") - tdSql.execute("insert into jsons2_1 using jsons2 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 2, false, 'json2', '你是2')") - tdSql.execute("insert into jsons2_2 using jsons2 tags('{\"tag1\":5,\"tag2\":null}') values (1591060628000, 2, true, 'json2', 'sss')") - - tdSql.execute("create table if not exists jsons3(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)") - tdSql.execute("insert into jsons3_1 using jsons3 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 3, false, 'json3', '你是3')") - tdSql.execute("insert into jsons3_2 using jsons3 tags('{\"tag1\":5,\"tag2\":\"beijing\"}') values (1591060638000, 2, true, 'json3', 'sss')") - tdSql.query("select 'sss',33,a.jtag->'tag3' from jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'") + tdSql.execute(f"create table if not exists {dbname}.jsons2(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)") + tdSql.execute(f"insert into {dbname}.jsons2_1 using {dbname}.jsons2 tags('{{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}}') values(1591060618000, 2, false, 'json2', '你是2')") + tdSql.execute(f"insert into {dbname}.jsons2_2 using {dbname}.jsons2 tags('{{\"tag1\":5,\"tag2\":null}}') values (1591060628000, 2, true, 'json2', 'sss')") + + tdSql.execute(f"create table if not exists {dbname}.jsons3(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)") + tdSql.execute(f"insert into {dbname}.jsons3_1 using {dbname}.jsons3 tags('{{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}}') values(1591060618000, 3, false, 'json3', '你是3')") + tdSql.execute(f"insert into {dbname}.jsons3_2 using {dbname}.jsons3 tags('{{\"tag1\":5,\"tag2\":\"beijing\"}}') values (1591060638000, 2, true, 'json3', 'sss')") + tdSql.query(f"select 'sss',33,a.jtag->'tag3' from {dbname}.jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'") tdSql.checkData(0, 0, "sss") tdSql.checkData(0, 2, "true") - res = tdSql.getColNameList("select 'sss',33,a.jtag->'tag3' from jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'") + res = tdSql.getColNameList(f"select 'sss',33,a.jtag->'tag3' from {dbname}.jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'") cname_list = [] cname_list.append("'sss'") cname_list.append("33") @@ -424,19 +405,19 @@ class TDTestCase: tdSql.checkColNameList(res, cname_list) # # test group by & order by json tag - tdSql.query("select ts,jtag->'tag1' from jsons1 partition by jtag->'tag1' order by jtag->'tag1' desc") + tdSql.query(f"select ts,jtag->'tag1' from {dbname}.jsons1 partition by jtag->'tag1' order by jtag->'tag1' desc") tdSql.checkRows(11) tdSql.checkData(0, 1, '"femail"') tdSql.checkData(2, 1, '"收到货"') tdSql.checkData(7, 1, "false") - tdSql.error("select count(*) from jsons1 group by jtag") - tdSql.error("select count(*) from jsons1 partition by jtag") - tdSql.error("select count(*) from jsons1 group by jtag order by jtag") - tdSql.error("select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag2'") - tdSql.error("select count(*) from jsons1 group by jtag->'tag1' order by jtag") - tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1' desc") + tdSql.error(f"select count(*) from {dbname}.jsons1 group by jtag") + tdSql.error(f"select count(*) from {dbname}.jsons1 partition by jtag") + tdSql.error(f"select count(*) from {dbname}.jsons1 group by jtag order by jtag") + tdSql.error(f"select count(*) from {dbname}.jsons1 group by jtag->'tag1' order by jtag->'tag2'") + tdSql.error(f"select count(*) from {dbname}.jsons1 group by jtag->'tag1' order by jtag") + tdSql.query(f"select count(*),jtag->'tag1' from {dbname}.jsons1 group by jtag->'tag1' order by jtag->'tag1' desc") tdSql.checkRows(8) tdSql.checkData(0, 0, 2) tdSql.checkData(0, 1, '"femail"') @@ -447,7 +428,7 @@ class TDTestCase: tdSql.checkData(5, 0, 1) tdSql.checkData(5, 1, "false") - tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1' asc") + tdSql.query(f"select count(*),jtag->'tag1' from {dbname}.jsons1 group by jtag->'tag1' order by jtag->'tag1' asc") tdSql.checkRows(8) tdSql.checkData(0, 1, None) tdSql.checkData(2, 0, 1) @@ -458,7 +439,7 @@ class TDTestCase: tdSql.checkData(7, 1, '"femail"') # test stddev with group by json tag - tdSql.query("select stddev(dataint),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1'") + tdSql.query(f"select stddev(dataint),jtag->'tag1' from {dbname}.jsons1 group by jtag->'tag1' order by jtag->'tag1'") tdSql.checkRows(8) tdSql.checkData(0, 1, None) tdSql.checkData(4, 0, 0) @@ -466,222 +447,222 @@ class TDTestCase: tdSql.checkData(7, 0, 11) tdSql.checkData(7, 1, '"femail"') - res = tdSql.getColNameList("select stddev(dataint),jsons1.jtag->'tag1' from jsons1 group by jsons1.jtag->'tag1' order by jtag->'tag1'") + res = tdSql.getColNameList(f"select stddev(dataint),jsons1.jtag->'tag1' from {dbname}.jsons1 group by jsons1.jtag->'tag1' order by jtag->'tag1'") cname_list = [] cname_list.append("stddev(dataint)") cname_list.append("jsons1.jtag->'tag1'") tdSql.checkColNameList(res, cname_list) # test top/bottom with group by json tag - tdSql.query("select top(dataint,2),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1'") + tdSql.query(f"select top(dataint,2),jtag->'tag1' from {dbname}.jsons1 group by jtag->'tag1' order by jtag->'tag1'") tdSql.checkRows(11) tdSql.checkData(0, 1, None) # test having - tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' having count(*) > 1") + tdSql.query(f"select count(*),jtag->'tag1' from {dbname}.jsons1 group by jtag->'tag1' having count(*) > 1") tdSql.checkRows(3) # subquery with json tag - tdSql.query("select * from (select jtag, dataint from jsons1) order by dataint") + tdSql.query(f"select * from (select jtag, dataint from {dbname}.jsons1) order by dataint") tdSql.checkRows(11) tdSql.checkData(1, 1, 1) tdSql.checkData(5, 0, '{"tag1":false,"tag2":"beijing"}') - tdSql.error("select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)") - tdSql.error("select t->'tag1' from (select jtag->'tag1' as t, dataint from jsons1)") - tdSql.error("select ts,jtag->'tag1' from (select jtag->'tag1',tbname,ts from jsons1 order by ts)") + tdSql.error(f"select jtag->'tag1' from (select jtag->'tag1', dataint from {dbname}.jsons1)") + tdSql.error(f"select t->'tag1' from (select jtag->'tag1' as t, dataint from {dbname}.jsons1)") + tdSql.error(f"select ts,jtag->'tag1' from (select jtag->'tag1',tbname,ts from {dbname}.jsons1 order by ts)") # union all - tdSql.query("select jtag->'tag1' from jsons1 union all select jtag->'tag2' from jsons2") + tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1 union all select jtag->'tag2' from {dbname}.jsons2") tdSql.checkRows(13) - tdSql.query("select jtag->'tag1' from jsons1_1 union all select jtag->'tag2' from jsons2_1") + tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1_1 union all select jtag->'tag2' from {dbname}.jsons2_1") tdSql.checkRows(3) - tdSql.query("select jtag->'tag1' from jsons1_1 union all select jtag->'tag1' from jsons2_1") + tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1_1 union all select jtag->'tag1' from {dbname}.jsons2_1") tdSql.checkRows(3) - tdSql.query("select dataint,jtag->'tag1',tbname from jsons1 union all select dataint,jtag->'tag1',tbname from jsons2") + tdSql.query(f"select dataint,jtag->'tag1',tbname from {dbname}.jsons1 union all select dataint,jtag->'tag1',tbname from {dbname}.jsons2") tdSql.checkRows(13) - tdSql.query("select dataint,jtag,tbname from jsons1 union all select dataint,jtag,tbname from jsons2") + tdSql.query(f"select dataint,jtag,tbname from {dbname}.jsons1 union all select dataint,jtag,tbname from {dbname}.jsons2") tdSql.checkRows(13) #show create table - tdSql.query("show create table jsons1") + tdSql.query(f"show create table {dbname}.jsons1") tdSql.checkData(0, 1, 'CREATE STABLE `jsons1` (`ts` TIMESTAMP, `dataint` INT, `databool` BOOL, `datastr` NCHAR(50), `datastrbin` VARCHAR(150)) TAGS (`jtag` JSON)') #test aggregate function:count/avg/twa/irate/sum/stddev/leastsquares - tdSql.query("select count(*) from jsons1 where jtag is not null") + tdSql.query(f"select count(*) from {dbname}.jsons1 where jtag is not null") tdSql.checkData(0, 0, 10) - tdSql.query("select avg(dataint) from jsons1 where jtag is not null") + tdSql.query(f"select avg(dataint) from {dbname}.jsons1 where jtag is not null") tdSql.checkData(0, 0, 5.3) - # tdSql.query("select twa(dataint) from jsons1 where jtag is not null") + # tdSql.query(f"select twa(dataint) from {dbname}.jsons1 where jtag is not null") # tdSql.checkData(0, 0, 28.386363636363637) - # tdSql.query("select irate(dataint) from jsons1 where jtag is not null") + # tdSql.query(f"select irate(dataint) from {dbname}.jsons1 where jtag is not null") - tdSql.query("select sum(dataint) from jsons1 where jtag->'tag1' is not null") + tdSql.query(f"select sum(dataint) from {dbname}.jsons1 where jtag->'tag1' is not null") tdSql.checkData(0, 0, 45) - tdSql.query("select stddev(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.query(f"select stddev(dataint) from {dbname}.jsons1 where jtag->'tag1'>1") tdSql.checkData(0, 0, 4.496912521) - tdSql.query("SELECT LEASTSQUARES(dataint, 1, 1) from jsons1 where jtag is not null") + tdSql.query(f"select LEASTSQUARES(dataint, 1, 1) from {dbname}.jsons1 where jtag is not null") #test selection function:min/max/first/last/top/bottom/percentile/apercentile/last_row/interp - tdSql.query("select min(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.query(f"select min(dataint) from {dbname}.jsons1 where jtag->'tag1'>1") tdSql.checkData(0, 0, 1) - tdSql.query("select max(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.query(f"select max(dataint) from {dbname}.jsons1 where jtag->'tag1'>1") tdSql.checkData(0, 0, 11) - tdSql.query("select first(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.query(f"select first(dataint) from {dbname}.jsons1 where jtag->'tag1'>1") tdSql.checkData(0, 0, 2) - tdSql.query("select last(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.query(f"select last(dataint) from {dbname}.jsons1 where jtag->'tag1'>1") tdSql.checkData(0, 0, 11) - tdSql.query("select top(dataint,100) from jsons1 where jtag->'tag1'>1") + tdSql.query(f"select top(dataint,100) from {dbname}.jsons1 where jtag->'tag1'>1") tdSql.checkRows(3) - tdSql.query("select bottom(dataint,100) from jsons1 where jtag->'tag1'>1") + tdSql.query(f"select bottom(dataint,100) from {dbname}.jsons1 where jtag->'tag1'>1") tdSql.checkRows(3) - #tdSql.query("select percentile(dataint,20) from jsons1 where jtag->'tag1'>1") - tdSql.query("select apercentile(dataint, 50) from jsons1 where jtag->'tag1'>1") + #tdSql.query(f"select percentile(dataint,20) from {dbname}.jsons1 where jtag->'tag1'>1") + tdSql.query(f"select apercentile(dataint, 50) from {dbname}.jsons1 where jtag->'tag1'>1") tdSql.checkData(0, 0, 1.5) - # tdSql.query("select last_row(dataint) from jsons1 where jtag->'tag1'>1") - # tdSql.query("select interp(dataint) from jsons1 where ts = '2020-06-02 09:17:08.000' and jtag->'tag1'>1") + # tdSql.query(f"select last_row(dataint) from {dbname}.jsons1 where jtag->'tag1'>1") + # tdSql.query(f"select interp(dataint) from {dbname}.jsons1 where ts = '2020-06-02 09:17:08.000' and jtag->'tag1'>1") #test calculation function:diff/derivative/spread/ceil/floor/round/ - tdSql.query("select diff(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.query(f"select diff(dataint) from {dbname}.jsons1 where jtag->'tag1'>1") tdSql.checkRows(2) # tdSql.checkData(0, 0, -1) # tdSql.checkData(1, 0, 10) - tdSql.query("select derivative(dataint, 10m, 0) from jsons1 where jtag->'tag1'>1") + tdSql.query(f"select derivative(dataint, 10m, 0) from {dbname}.jsons1 where jtag->'tag1'>1") # tdSql.checkData(0, 0, -2) - tdSql.query("select spread(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.query(f"select spread(dataint) from {dbname}.jsons1 where jtag->'tag1'>1") tdSql.checkData(0, 0, 10) - tdSql.query("select ceil(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.query(f"select ceil(dataint) from {dbname}.jsons1 where jtag->'tag1'>1") tdSql.checkRows(3) - tdSql.query("select floor(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.query(f"select floor(dataint) from {dbname}.jsons1 where jtag->'tag1'>1") tdSql.checkRows(3) - tdSql.query("select round(dataint) from jsons1 where jtag->'tag1'>1") + tdSql.query(f"select round(dataint) from {dbname}.jsons1 where jtag->'tag1'>1") tdSql.checkRows(3) #math function - tdSql.query("select sin(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select sin(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select cos(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select cos(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select tan(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select tan(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select asin(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select asin(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select acos(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select acos(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select atan(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select atan(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select ceil(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select ceil(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select floor(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select floor(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select round(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select round(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select abs(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select abs(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select pow(dataint,5) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select pow(dataint,5) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select log(dataint,10) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select log(dataint,10) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select sqrt(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select sqrt(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select HISTOGRAM(dataint,'user_input','[1, 33, 555, 7777]',1) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select HISTOGRAM(dataint,'user_input','[1, 33, 555, 7777]',1) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select csum(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select csum(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select mavg(dataint,1) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select mavg(dataint,1) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select statecount(dataint,'GE',10) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select statecount(dataint,'GE',10) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select stateduration(dataint,'GE',0) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select stateduration(dataint,'GE',0) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select sample(dataint,3) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select sample(dataint,3) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select HYPERLOGLOG(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select HYPERLOGLOG(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(1) - tdSql.query("select twa(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select twa(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(1) # function not ready - tdSql.query("select tail(dataint,1) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select tail(dataint,1) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(1) - tdSql.query("select unique(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select unique(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select mode(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select mode(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(1) - tdSql.query("select irate(dataint) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select irate(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(1) #str function - tdSql.query("select upper(dataStr) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select upper(dataStr) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select ltrim(dataStr) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select ltrim(dataStr) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select lower(dataStr) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select lower(dataStr) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select rtrim(dataStr) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select rtrim(dataStr) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select LENGTH(dataStr) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select LENGTH(dataStr) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select CHAR_LENGTH(dataStr) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select CHAR_LENGTH(dataStr) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select SUBSTR(dataStr,5) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select SUBSTR(dataStr,5) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select CONCAT(dataStr,dataStrBin) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select CONCAT(dataStr,dataStrBin) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select CONCAT_ws('adad!@!@%$^$%$^$%^a',dataStr,dataStrBin) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select CONCAT_ws('adad!@!@%$^$%$^$%^a',dataStr,dataStrBin) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select CAST(dataStr as bigint) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select CAST(dataStr as bigint) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) #time function - tdSql.query("select now() from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select now() from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select today() from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select today() from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select TIMEZONE() from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select TIMEZONE() from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select TO_ISO8601(ts) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select TO_ISO8601(ts) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select TO_UNIXTIMESTAMP(datastr) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select TO_UNIXTIMESTAMP(datastr) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select TIMETRUNCATE(ts,1s) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select TIMETRUNCATE(ts,1s) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select TIMEDIFF(ts,_c0) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select TIMEDIFF(ts,_c0) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select TIMEDIFF(ts,1u) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select TIMEDIFF(ts,1u) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(3) - tdSql.query("select ELAPSED(ts,1h) from jsons1 where jtag->'tag1'>1;") + tdSql.query(f"select ELAPSED(ts,1h) from {dbname}.jsons1 where jtag->'tag1'>1;") tdSql.checkRows(1) # to_json() - tdSql.query("select to_json('{\"abc\":123}') from jsons1_1") + tdSql.query(f"select to_json('{{\"abc\":123}}') from {dbname}.jsons1_1") tdSql.checkRows(2) tdSql.checkData(0, 0, '{"abc":123}') tdSql.checkData(1, 0, '{"abc":123}') - tdSql.query("select to_json('null') from jsons1_1") + tdSql.query(f"select to_json('null') from {dbname}.jsons1_1") tdSql.checkRows(2) tdSql.checkData(0, 0, 'null') tdSql.checkData(1, 0, 'null') - tdSql.query("select to_json('{\"key\"}') from jsons1_1") + tdSql.query(f"select to_json('{{\"key\"}}') from {dbname}.jsons1_1") tdSql.checkRows(2) tdSql.checkData(0, 0, 'null') tdSql.checkData(1, 0, 'null') #test TD-12077 - tdSql.execute("insert into jsons1_16 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":-2.111}') values(1591062628000, 2, NULL, '你就会', 'dws')") - tdSql.query("select jtag->'tag3' from jsons1_16") + tdSql.execute(f"insert into {dbname}.jsons1_16 using {dbname}.jsons1 tags('{{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":-2.111}}') values(1591062628000, 2, NULL, '你就会', 'dws')") + tdSql.query(f"select jtag->'tag3' from {dbname}.jsons1_16") tdSql.checkData(0, 0, '-2.111000000') # test TD-12452 - tdSql.execute("ALTER TABLE jsons1_1 SET TAG jtag=NULL") - tdSql.query("select jtag from jsons1_1") + tdSql.execute(f"ALTER table {dbname}.jsons1_1 SET TAG jtag=NULL") + tdSql.query(f"select jtag from {dbname}.jsons1_1") tdSql.checkData(0, 0, None) - tdSql.execute("CREATE TABLE if not exists jsons1_20 using jsons1 tags(NULL)") - tdSql.query("select jtag from jsons1_20") + tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_20 using {dbname}.jsons1 tags(NULL)") + tdSql.query(f"select jtag from {dbname}.jsons1_20") tdSql.checkRows(0) - tdSql.execute("insert into jsons1_21 using jsons1 tags(NULL) values(1591061628000, 11, false, '你就会','')") - tdSql.query("select jtag from jsons1_21") + tdSql.execute(f"insert into {dbname}.jsons1_21 using {dbname}.jsons1 tags(NULL) values(1591061628000, 11, false, '你就会','')") + tdSql.query(f"select jtag from {dbname}.jsons1_21") tdSql.checkData(0, 0, None) # # #test TD-12389 @@ -691,23 +672,23 @@ class TDTestCase: tdSql.checkData(5, 2, 4095) # # #test TD-13918 - tdSql.execute("drop table if exists jsons_13918_1") - tdSql.execute("drop table if exists jsons_13918_2") - tdSql.execute("drop table if exists jsons_13918_3") - tdSql.execute("drop table if exists jsons_13918_4") - tdSql.execute("drop table if exists jsons_stb") - tdSql.execute("create table jsons_stb (ts timestamp, dataInt int) tags (jtag json)") - tdSql.error("create table jsons_13918_1 using jsons_stb tags ('nullx')") - tdSql.error("create table jsons_13918_2 using jsons_stb tags (nullx)") - tdSql.error("insert into jsons_13918_3 using jsons_stb tags('NULLx') values(1591061628001, 11)") - tdSql.error("insert into jsons_13918_4 using jsons_stb tags(NULLx) values(1591061628002, 11)") - tdSql.execute("create table jsons_13918_1 using jsons_stb tags ('null')") - tdSql.execute("create table jsons_13918_2 using jsons_stb tags (null)") - tdSql.execute("insert into jsons_13918_1 values(1591061628003, 11)") - tdSql.execute("insert into jsons_13918_2 values(1591061628004, 11)") - tdSql.execute("insert into jsons_13918_3 using jsons_stb tags('NULL') values(1591061628005, 11)") - tdSql.execute("insert into jsons_13918_4 using jsons_stb tags(\"NULL\") values(1591061628006, 11)") - tdSql.query("select * from jsons_stb") + tdSql.execute(f"drop table if exists {dbname}.jsons_13918_1") + tdSql.execute(f"drop table if exists {dbname}.jsons_13918_2") + tdSql.execute(f"drop table if exists {dbname}.jsons_13918_3") + tdSql.execute(f"drop table if exists {dbname}.jsons_13918_4") + tdSql.execute(f"drop table if exists {dbname}.jsons_stb") + tdSql.execute(f"create table {dbname}.jsons_stb (ts timestamp, dataInt int) tags (jtag json)") + tdSql.error(f"create table {dbname}.jsons_13918_1 using {dbname}.jsons_stb tags ('nullx')") + tdSql.error(f"create table {dbname}.jsons_13918_2 using {dbname}.jsons_stb tags (nullx)") + tdSql.error(f"insert into {dbname}.jsons_13918_3 using {dbname}.jsons_stb tags('NULLx') values(1591061628001, 11)") + tdSql.error(f"insert into {dbname}.jsons_13918_4 using {dbname}.jsons_stb tags(NULLx) values(1591061628002, 11)") + tdSql.execute(f"create table {dbname}.jsons_13918_1 using {dbname}.jsons_stb tags ('null')") + tdSql.execute(f"create table {dbname}.jsons_13918_2 using {dbname}.jsons_stb tags (null)") + tdSql.execute(f"insert into {dbname}.jsons_13918_1 values(1591061628003, 11)") + tdSql.execute(f"insert into {dbname}.jsons_13918_2 values(1591061628004, 11)") + tdSql.execute(f"insert into {dbname}.jsons_13918_3 using {dbname}.jsons_stb tags('NULL') values(1591061628005, 11)") + tdSql.execute(f"insert into {dbname}.jsons_13918_4 using {dbname}.jsons_stb tags(\"NULL\") values(1591061628006, 11)") + tdSql.query(f"select * from {dbname}.jsons_stb") tdSql.checkRows(4) def stop(self): @@ -717,4 +698,3 @@ class TDTestCase: tdCases.addWindows(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase()) - diff --git a/tests/system-test/2-query/json_tag_large_tables.py b/tests/system-test/2-query/json_tag_large_tables.py index 5d7df6ceb87d940eca111324d3c92886c12442c4..9164c108f94ec9f538ba8d12ecc45be3b3b60e06 100644 --- a/tests/system-test/2-query/json_tag_large_tables.py +++ b/tests/system-test/2-query/json_tag_large_tables.py @@ -35,7 +35,7 @@ class TDTestCase: self.testcaseFilename = os.path.split(__file__)[-1] # os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), True) + tdSql.init(conn.cursor(), False) def run(self): # tdSql.prepare() @@ -47,24 +47,24 @@ class TDTestCase: i = 0 # add 100000 table tdSql.execute("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)") - while i <= 10 0000: + while i <= 100000: sql = """insert into jsons1_{%d} using jsons1 tags('{"tag1":{%d}}') values(1591060618000, 1, false, 'json1', '你是') (1591060608000, 23, true, '等等', 'json')"""%(i, i) tdSql.execute(sql) i = i + 1 - // do query - i = 0 - while i <= 10 0000: + # do query + i = 0 + while i <= 100000: sql = """select count(*) from jsons1 where jtag->'tag1' = %d"""%(i) tdSql.query(sql) if 1 != tdSql.getRows(): print("err: %s"%(sql)) - - while i <= 10000000 + + while i <= 10000000: sql = """insert into jsons1_{%d} using jsons1 tags('{"tag1":{%d}}') values(1591060618000, 1, false, 'json1', '你是') (1591060608000, 23, true, '等等', 'json')"""%(i, i) tdSql.execute(sql) i = i + 1 - + i = 0 # drop super table tdSql.execute("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)") @@ -76,7 +76,7 @@ class TDTestCase: tdSql.execute('drop stable jsons1') - # drop database + # drop database i = 0 tdSql.execute("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)") while i <= 100000: @@ -84,10 +84,10 @@ class TDTestCase: tdSql.execute(sql) i = i + 1 tdSql.execute('drop database db') - + # test duplicate key using the first one. elimate empty key - #tdSql.execute("CREATE TABLE if not exists jsons1_8 using jsons1 tags('{\"tag1\":null, \"tag1\":true, \"tag1\":45, \"1tag$\":2, \" \":90, \"\":32}')") tdSql.query("select jtag from jsons1_8") tdSql.checkRows(0); + #tdSql.execute("CREATE TABLE if not exists jsons1_8 using jsons1 tags('{\"tag1\":null, \"tag1\":true, \"tag1\":45, \"1tag$\":2, \" \":90, \"\":32}')") tdSql.query("select jtag from jsons1_8") tdSql.checkRows(0); #tdSql.query("select ts,jtag from jsons1 order by ts limit 2,3") #tdSql.checkData(0, 0, '2020-06-02 09:17:08.000') #tdSql.checkData(0, 1, '{"tag1":5,"tag2":"beijing"}') @@ -704,4 +704,3 @@ class TDTestCase: tdCases.addWindows(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase()) - diff --git a/tests/system-test/2-query/last_row.py b/tests/system-test/2-query/last_row.py index 105dc883c7f921119395c17d1488a2ec3c1869b7..5d435b068fb12959fd2bdc6f02968b2a7ffe7c9d 100644 --- a/tests/system-test/2-query/last_row.py +++ b/tests/system-test/2-query/last_row.py @@ -10,29 +10,26 @@ import random class TDTestCase: - updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143, - "jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143, - "wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "fnDebugFlag": 143 ,"udf":0} def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), True) + tdSql.init(conn.cursor(), False) self.tb_nums = 10 self.row_nums = 20 self.ts = 1434938400000 self.time_step = 1000 - def insert_datas_and_check_abs(self ,tbnums , rownums , time_step ,cache_value ): - tdSql.execute("drop database if exists test ") - tdLog.info(" prepare datas for auto check abs function ") + def insert_datas_and_check_abs(self, tbnums, rownums, time_step, cache_value, dbname="test"): + tdSql.execute(f"drop database if exists {dbname} ") + tdLog.info("prepare datas for auto check abs function ") - tdSql.execute(f" create database test cachemodel {cache_value} ") - tdSql.execute(" use test ") - tdSql.execute(" create stable test.stb (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint,\ + tdSql.execute(f"create database {dbname} cachemodel {cache_value} ") + tdSql.execute(f"use {dbname} ") + tdSql.execute(f"create stable {dbname}.stb (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint,\ c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int)") for tbnum in range(tbnums): - tbname = "test.sub_tb_%d"%tbnum - tdSql.execute(" create table %s using stb tags(%d) "%(tbname , tbnum)) + tbname = f"{dbname}.sub_tb_{tbnum}" + tdSql.execute(f"create table {tbname} using {dbname}.stb tags({tbnum}) ") ts = self.ts for row in range(rownums): @@ -49,66 +46,65 @@ class TDTestCase: c10 = ts tdSql.execute(f" insert into {tbname} values ({ts},{c1},{c2},{c3},{c4},{c5},{c6},{c7},{c8},{c9},{c10})") - tdSql.execute("use test") tbnames = ["stb", "sub_tb_1"] support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"] for tbname in tbnames: - tdSql.query("desc {}".format(tbname)) + tdSql.query(f"desc {dbname}.{tbname}") coltypes = tdSql.queryResult for coltype in coltypes: colname = coltype[0] - abs_sql = "select abs({}) from {} order by tbname ".format(colname, 'test.'+tbname) - origin_sql = "select {} from {} order by tbname".format(colname, 'test.'+tbname) + abs_sql = f"select abs({colname}) from {dbname}.{tbname} order by tbname " + origin_sql = f"select {colname} from {dbname}.{tbname} order by tbname" if coltype[1] in support_types: self.check_result_auto(origin_sql , abs_sql) - def prepare_datas(self ,cache_value): - tdSql.execute("drop database if exists db ") - create_db_sql = f"create database if not exists db keep 3650 duration 1000 cachemodel {cache_value}" + def prepare_datas(self ,cache_value, dbname="db"): + tdSql.execute(f"drop database if exists {dbname} ") + create_db_sql = f"create database if not exists {dbname} keep 3650 duration 1000 cachemodel {cache_value}" tdSql.execute(create_db_sql) - tdSql.execute("use db") + tdSql.execute(f"use {dbname}") tdSql.execute( - '''create table db.stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) tdSql.execute( - ''' - create table db.t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table db.ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into db.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into db.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - "insert into db.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") tdSql.execute( - "insert into db.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") tdSql.execute( - "insert into db.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") tdSql.execute( - "insert into db.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") tdSql.execute( - "insert into db.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - "insert into db.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - "insert into db.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into db.t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -124,53 +120,53 @@ class TDTestCase: ''' ) - def prepare_tag_datas(self,cache_value): + def prepare_tag_datas(self,cache_value, dbname="testdb"): - tdSql.execute("drop database if exists testdb ") + tdSql.execute(f"drop database if exists {dbname} ") # prepare datas - tdSql.execute(f"create database if not exists testdb keep 3650 duration 1000 cachemodel {cache_value}") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 cachemodel {cache_value}") - tdSql.execute(" use testdb ") + tdSql.execute(f"use {dbname} ") - tdSql.execute(f" create stable testdb.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp , uc1 int unsigned,\ + tdSql.execute(f"create stable {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp , uc1 int unsigned,\ uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags( t1 int , t2 bigint , t3 smallint , t4 tinyint , t5 float , t6 double , t7 bool , t8 binary(36)\ , t9 nchar(36) , t10 int unsigned , t11 bigint unsigned ,t12 smallint unsigned , t13 tinyint unsigned ,t14 timestamp ) ") tdSql.execute( - ''' - create table testdb.t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): tdSql.execute( - f'create table testdb.ct{i+1} using stb1 tags ( {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" ,{111*i}, {1*i},{1*i},{1*i},now())') + f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" ,{111*i}, {1*i},{1*i},{1*i},now())') for i in range(9): tdSql.execute( - f"insert into testdb.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a ,{111*i},{1111*i},{i},{i} )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a ,{111*i},{1111*i},{i},{i} )" ) tdSql.execute( - f"insert into testdb.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a ,{111*i},{1111*i},{i},{i})" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a ,{111*i},{1111*i},{i},{i})" ) tdSql.execute( - "insert into testdb.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a ,0,0,0,0)") + f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a ,0,0,0,0)") tdSql.execute( - "insert into testdb.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a , 999 , 9999 , 9 , 9)") + f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a , 999 , 9999 , 9 , 9)") tdSql.execute( - "insert into testdb.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a , 999 , 99999 , 9 , 9)") + f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a , 999 , 99999 , 9 , 9)") tdSql.execute( - "insert into testdb.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a ,999 , 99999 , 9 , 9)") + f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a ,999 , 99999 , 9 , 9)") tdSql.execute( - "insert into testdb.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL) ") + f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL) ") tdSql.execute( - "insert into testdb.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL) ") + f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL) ") tdSql.execute( - "insert into testdb.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL ) ") + f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into testdb.t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -217,118 +213,116 @@ class TDTestCase: tdLog.info( "abs value check pass , it work as expected ,sql is \"%s\" " % abs_query) - def test_errors(self): - tdSql.execute("use testdb") - + def test_errors(self, dbname="testdb"): # bug need fix - tdSql.error("select last_row(c1 ,NULL) from testdb.t1") + tdSql.error(f"select last_row(c1 ,NULL) from {dbname}.t1") error_sql_lists = [ - "select last_row from testdb.t1", - "select last_row(-+--+c1) from testdb.t1", - "select last_row(123--123)==1 from testdb.t1", - "select last_row(c1) as 'd1' from testdb.t1", - #"select last_row(c1 ,NULL) from testdb.t1", - "select last_row(,) from testdb.t1;", - "select last_row(abs(c1) ab from testdb.t1)", - "select last_row(c1) as int from testdb.t1", - "select last_row from testdb.stb1", - "select last_row(123--123)==1 from testdb.stb1", - "select last_row(c1) as 'd1' from testdb.stb1", - #"select last_row(c1 ,NULL) from testdb.stb1", - "select last_row(,) from testdb.stb1;", - "select last_row(abs(c1) ab from testdb.stb1)", - "select last_row(c1) as int from testdb.stb1" + f"select last_row from {dbname}.t1", + f"select last_row(-+--+c1) from {dbname}.t1", + f"select last_row(123--123)==1 from {dbname}.t1", + f"select last_row(c1) as 'd1' from {dbname}.t1", + #f"select last_row(c1 ,NULL) from {dbname}.t1", + f"select last_row(,) from {dbname}.t1;", + f"select last_row(abs(c1) ab from {dbname}.t1)", + f"select last_row(c1) as int from {dbname}.t1", + f"select last_row from {dbname}.stb1", + f"select last_row(123--123)==1 from {dbname}.stb1", + f"select last_row(c1) as 'd1' from {dbname}.stb1", + #f"select last_row(c1 ,NULL) from {dbname}.stb1", + f"select last_row(,) from {dbname}.stb1;", + f"select last_row(abs(c1) ab from {dbname}.stb1)", + f"select last_row(c1) as int from {dbname}.stb1" ] for error_sql in error_sql_lists: tdSql.error(error_sql) - def support_types(self): - tdSql.execute("use testdb") + def support_types(self, dbname="testdb"): + tdSql.execute(f"use {dbname}") tbnames = ["stb1", "t1", "ct1", "ct2"] for tbname in tbnames: - tdSql.query("desc {}".format(tbname)) + tdSql.query(f"desc {dbname}.{tbname}") coltypes = tdSql.queryResult for coltype in coltypes: colname = coltype[0] col_note = coltype[-1] if col_note != "TAG": - abs_sql = "select last_row({}) from {}".format(colname, "testdb."+tbname) + abs_sql = f"select last_row({colname}) from {dbname}.{tbname}" tdSql.query(abs_sql) - def basic_abs_function(self): + def basic_abs_function(self, dbname="testdb"): # basic query - tdSql.query("select c1 from testdb.ct3") + tdSql.query(f"select c1 from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select c1 from testdb.t1") + tdSql.query(f"select c1 from {dbname}.t1") tdSql.checkRows(12) - tdSql.query("select c1 from testdb.stb1") + tdSql.query(f"select c1 from {dbname}.stb1") tdSql.checkRows(25) # used for empty table , ct3 is empty - tdSql.query("select last_row(c1) from testdb.ct3") + tdSql.query(f"select last_row(c1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select last_row(c2) from testdb.ct3") + tdSql.query(f"select last_row(c2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select last_row(c3) from testdb.ct3") + tdSql.query(f"select last_row(c3) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select last_row(c4) from testdb.ct3") + tdSql.query(f"select last_row(c4) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select last_row(c5) from testdb.ct3") + tdSql.query(f"select last_row(c5) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select last_row(c6) from testdb.ct3") + tdSql.query(f"select last_row(c6) from {dbname}.ct3") # used for regular table # bug need fix - tdSql.query("select last_row(c1) from testdb.t1") + tdSql.query(f"select last_row(c1) from {dbname}.t1") tdSql.checkData(0, 0, None) - tdSql.query("select last_row(c1) from testdb.ct4") + tdSql.query(f"select last_row(c1) from {dbname}.ct4") tdSql.checkData(0, 0, None) - tdSql.query("select last_row(c1) from testdb.stb1") + tdSql.query(f"select last_row(c1) from {dbname}.stb1") tdSql.checkData(0, 0, None) - - # support regular query about last ,first ,last_row - tdSql.error("select last_row(c1,NULL) from testdb.t1") - tdSql.error("select last_row(NULL) from testdb.t1") - tdSql.error("select last(NULL) from testdb.t1") - tdSql.error("select first(NULL) from testdb.t1") - - tdSql.query("select last_row(c1,123) from testdb.t1") + + # support regular query about last ,first ,last_row + tdSql.error(f"select last_row(c1,NULL) from {dbname}.t1") + tdSql.error(f"select last_row(NULL) from {dbname}.t1") + tdSql.error(f"select last(NULL) from {dbname}.t1") + tdSql.error(f"select first(NULL) from {dbname}.t1") + + tdSql.query(f"select last_row(c1,123) from {dbname}.t1") tdSql.checkData(0,0,None) tdSql.checkData(0,1,123) - tdSql.query("select last_row(123) from testdb.t1") + tdSql.query(f"select last_row(123) from {dbname}.t1") tdSql.checkData(0,0,123) - tdSql.error("select last(c1,NULL) from testdb.t1") + tdSql.error(f"select last(c1,NULL) from {dbname}.t1") - tdSql.query("select last(c1,123) from testdb.t1") + tdSql.query(f"select last(c1,123) from {dbname}.t1") tdSql.checkData(0,0,9) tdSql.checkData(0,1,123) - tdSql.error("select first(c1,NULL) from testdb.t1") + tdSql.error(f"select first(c1,NULL) from {dbname}.t1") - tdSql.query("select first(c1,123) from testdb.t1") + tdSql.query(f"select first(c1,123) from {dbname}.t1") tdSql.checkData(0,0,1) tdSql.checkData(0,1,123) - tdSql.error("select last_row(c1,c2,c3,NULL,c4) from testdb.t1") + tdSql.error(f"select last_row(c1,c2,c3,NULL,c4) from {dbname}.t1") - tdSql.query("select last_row(c1,c2,c3,123,c4) from testdb.t1") + tdSql.query(f"select last_row(c1,c2,c3,123,c4) from {dbname}.t1") tdSql.checkData(0,0,None) tdSql.checkData(0,1,None) tdSql.checkData(0,2,None) tdSql.checkData(0,3,123) tdSql.checkData(0,4,None) - - tdSql.error("select last_row(c1,c2,c3,NULL,c4,t1,t2) from testdb.ct1") - tdSql.query("select last_row(c1,c2,c3,123,c4,t1,t2) from testdb.ct1") + tdSql.error(f"select last_row(c1,c2,c3,NULL,c4,t1,t2) from {dbname}.ct1") + + tdSql.query(f"select last_row(c1,c2,c3,123,c4,t1,t2) from {dbname}.ct1") tdSql.checkData(0,0,9) tdSql.checkData(0,1,-99999) tdSql.checkData(0,2,-999) @@ -338,13 +332,13 @@ class TDTestCase: tdSql.checkData(0,5,0) # # bug need fix - tdSql.query("select last_row(c1), c2, c3 , c4, c5 from testdb.t1") + tdSql.query(f"select last_row(c1), c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) # # bug need fix - tdSql.query("select last_row(c1), c2, c3 , c4, c5 from testdb.ct1") + tdSql.query(f"select last_row(c1), c2, c3 , c4, c5 from {dbname}.ct1") tdSql.checkData(0, 0, 9) tdSql.checkData(0, 1, -99999) tdSql.checkData(0, 2, -999) @@ -352,7 +346,7 @@ class TDTestCase: tdSql.checkData(0, 4,-9.99000) # bug need fix - tdSql.query("select last_row(c1), c2, c3 , c4, c5 from testdb.stb1 where tbname='ct1'") + tdSql.query(f"select last_row(c1), c2, c3 , c4, c5 from {dbname}.stb1 where tbname='ct1'") tdSql.checkData(0, 0, 9) tdSql.checkData(0, 1, -99999) tdSql.checkData(0, 2, -999) @@ -360,124 +354,124 @@ class TDTestCase: tdSql.checkData(0, 4,-9.99000) # bug fix - tdSql.query("select last_row(abs(c1)) from testdb.ct1") + tdSql.query(f"select last_row(abs(c1)) from {dbname}.ct1") tdSql.checkData(0,0,9) # # bug fix - tdSql.query("select last_row(c1+1) from testdb.ct1") - tdSql.query("select last_row(c1+1) from testdb.stb1") - tdSql.query("select last_row(c1+1) from testdb.t1") + tdSql.query(f"select last_row(c1+1) from {dbname}.ct1") + tdSql.query(f"select last_row(c1+1) from {dbname}.stb1") + tdSql.query(f"select last_row(c1+1) from {dbname}.t1") # used for stable table - tdSql.query("select last_row(c1 ,c2 ,c3) ,last_row(c4) from testdb.ct1") + tdSql.query(f"select last_row(c1 ,c2 ,c3) ,last_row(c4) from {dbname}.ct1") tdSql.checkData(0,0,9) tdSql.checkData(0,1,-99999) tdSql.checkData(0,2,-999) tdSql.checkData(0,3,None) # bug need fix - tdSql.query("select last_row(c1 ,c2 ,c3) from testdb.stb1 ") + tdSql.query(f"select last_row(c1 ,c2 ,c3) from {dbname}.stb1 ") tdSql.checkData(0,0,None) tdSql.checkData(0,1,None) tdSql.checkData(0,2,None) - tdSql.query('select last_row(c1) from testdb.t1 where ts <"2022-12-31 01:01:36.000"') + tdSql.query(f'select last_row(c1) from {dbname}.t1 where ts <"2022-12-31 01:01:36.000"') tdSql.checkData(0,0,8) # bug need fix - tdSql.query("select abs(last_row(c1)-2)+max(c1),ceil(last_row(c4)-2) from testdb.stb1 where c4 is not null") + tdSql.query(f"select abs(last_row(c1)-2)+max(c1),ceil(last_row(c4)-2) from {dbname}.stb1 where c4 is not null") tdSql.checkData(0,0,16.000000000) tdSql.checkData(0,1,-101.000000000) - tdSql.query("select abs(last_row(c1)-2)+max(c1),ceil(last_row(c4)-2) from testdb.ct1 where c4<0") + tdSql.query(f"select abs(last_row(c1)-2)+max(c1),ceil(last_row(c4)-2) from {dbname}.ct1 where c4<0") tdSql.checkData(0,0,16.000000000) tdSql.checkData(0,1,-101.000000000) - tdSql.query("select last_row(ceil(c1+2)+floor(c1)-10) from testdb.stb1") + tdSql.query(f"select last_row(ceil(c1+2)+floor(c1)-10) from {dbname}.stb1") tdSql.checkData(0,0,None) - tdSql.query("select last_row(ceil(c1+2)+floor(c1)-10) from testdb.ct1") + tdSql.query(f"select last_row(ceil(c1+2)+floor(c1)-10) from {dbname}.ct1") tdSql.checkData(0,0,10.000000000) # filter for last_row # bug need fix for all function - tdSql.query("select last_row(ts ,c1 ) from testdb.ct4 where t1 = 1 ") + tdSql.query(f"select last_row(ts ,c1 ) from {dbname}.ct4 where t1 = 1 ") tdSql.checkRows(0) - tdSql.query("select count(c1) from testdb.ct4 where t1 = 1 ") + tdSql.query(f"select count(c1) from {dbname}.ct4 where t1 = 1 ") tdSql.checkRows(0) - tdSql.query("select last_row(c1) ,last(c1) from testdb.stb1 where c1 is null") + tdSql.query(f"select last_row(c1) ,last(c1) from {dbname}.stb1 where c1 is null") tdSql.checkRows(1) tdSql.checkData(0,0,None) tdSql.checkData(0,1,None) - tdSql.query("select last_row(c1) ,count(*) from testdb.stb1 where c1 is null") + tdSql.query(f"select last_row(c1) ,count(*) from {dbname}.stb1 where c1 is null") tdSql.checkData(0,0,None) tdSql.checkData(0,1,3) - tdSql.query("select last_row(c1) ,count(c1) from testdb.stb1 where c1 is null") + tdSql.query(f"select last_row(c1) ,count(c1) from {dbname}.stb1 where c1 is null") tdSql.checkData(0,0,None) tdSql.checkData(0,1,0) # bug need fix - tdSql.query("select tbname ,last_row(c1) from testdb.stb1") + tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1") tdSql.checkData(0,0,'ct4') tdSql.checkData(0,1,None) - tdSql.query(" select tbname ,last_row(c1) from testdb.stb1 partition by tbname order by tbname ") + tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1 partition by tbname order by tbname ") tdSql.checkData(0,0,'ct1') tdSql.checkData(0,1,9) tdSql.checkData(1,0,'ct4') tdSql.checkData(1,1,None) - tdSql.query(" select tbname ,last_row(c1) from testdb.stb1 group by tbname order by tbname ") + tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1 group by tbname order by tbname ") tdSql.checkData(0,0,'ct1') tdSql.checkData(0,1,9) tdSql.checkData(1,0,'ct4') tdSql.checkData(1,1,None) - tdSql.query(" select t1 ,count(c1) from testdb.stb1 partition by t1 ") + tdSql.query(f"select t1 ,count(c1) from {dbname}.stb1 partition by t1 ") tdSql.checkRows(2) # filter by tbname - tdSql.query("select last_row(c1) from testdb.stb1 where tbname = 'ct1' ") + tdSql.query(f"select last_row(c1) from {dbname}.stb1 where tbname = 'ct1' ") tdSql.checkData(0,0,9) # bug need fix - tdSql.query("select tbname ,last_row(c1) from testdb.stb1 where tbname = 'ct1' ") + tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1 where tbname = 'ct1' ") tdSql.checkData(0,1,9) - tdSql.query("select tbname ,last_row(c1) from testdb.stb1 partition by tbname order by tbname") + tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1 partition by tbname order by tbname") tdSql.checkData(0, 0, 'ct1') tdSql.checkData(0, 1, 9) tdSql.checkData(1, 0, 'ct4') tdSql.checkData(1, 1, None) - tdSql.query("select tbname ,last_row(c1) from testdb.stb1 group by tbname order by tbname") + tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1 group by tbname order by tbname") tdSql.checkData(0, 0, 'ct1') tdSql.checkData(0, 1, 9) tdSql.checkData(1, 0, 'ct4') tdSql.checkData(1, 1, None) # last_row for only tag - tdSql.query("select last_row(t1 ,t2 ,t3 , t4 ) from testdb.stb1") + tdSql.query(f"select last_row(t1 ,t2 ,t3 , t4 ) from {dbname}.stb1") tdSql.checkData(0,0,3) tdSql.checkData(0,1,33333) tdSql.checkData(0,2,333) tdSql.checkData(0,3,3) - tdSql.query("select last_row(abs(floor(t1)) ,t2 ,ceil(abs(t3)) , abs(ceil(t4)) ) from testdb.stb1") + tdSql.query(f"select last_row(abs(floor(t1)) ,t2 ,ceil(abs(t3)) , abs(ceil(t4)) ) from {dbname}.stb1") tdSql.checkData(0,0,3) tdSql.checkData(0,1,33333) tdSql.checkData(0,2,333) tdSql.checkData(0,3,3) # filter by tag - tdSql.query("select tbname ,last_row(c1) from testdb.stb1 where t1 =0 ") + tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1 where t1 =0 ") tdSql.checkData(0,1,9) - tdSql.query("select tbname ,last_row(c1) ,t1 from testdb.stb1 partition by t1 order by t1") + tdSql.query(f"select tbname ,last_row(c1) ,t1 from {dbname}.stb1 partition by t1 order by t1") tdSql.checkData(0, 0, 'ct1') tdSql.checkData(0, 1, 9) tdSql.checkData(1, 0, 'ct4') @@ -485,56 +479,55 @@ class TDTestCase: # filter by col - tdSql.query("select tbname ,last_row(c1),abs(c1)from testdb.stb1 where c1 =1;") + tdSql.query(f"select tbname ,last_row(c1),abs(c1)from {dbname}.stb1 where c1 =1;") tdSql.checkData(0, 0, 'ct1') tdSql.checkData(0, 1, 1) tdSql.checkData(0, 2, 1) - tdSql.query("select last_row(c1) from testdb.stb1 where abs(ceil(c1))*c1==1") + tdSql.query(f"select last_row(c1) from {dbname}.stb1 where abs(ceil(c1))*c1==1") tdSql.checkData(0,0,1) # mix with common functions - tdSql.query("select last_row(*) ,last(*) from testdb.stb1 ") + tdSql.query(f"select last_row(*) ,last(*) from {dbname}.stb1 ") tdSql.checkRows(1) - tdSql.query("select last_row(*) ,last(*) from testdb.stb1 ") + tdSql.query(f"select last_row(*) ,last(*) from {dbname}.stb1 ") tdSql.checkRows(1) - tdSql.query("select last_row(c1+abs(c1)) from testdb.stb1 partition by tbname order by tbname") - tdSql.query("select last(c1), max(c1+abs(c1)),last_row(c1+abs(c1)) from testdb.stb1 partition by tbname order by tbname") + tdSql.query(f"select last_row(c1+abs(c1)) from {dbname}.stb1 partition by tbname order by tbname") + tdSql.query(f"select last(c1), max(c1+abs(c1)),last_row(c1+abs(c1)) from {dbname}.stb1 partition by tbname order by tbname") # # bug need fix ,taosd crash - tdSql.error("select last_row(*) ,last(*) from testdb.stb1 partition by tbname order by last(*)") - tdSql.error("select last_row(*) ,last(*) from testdb.stb1 partition by tbname order by last_row(*)") + tdSql.error(f"select last_row(*) ,last(*) from {dbname}.stb1 partition by tbname order by last(*)") + tdSql.error(f"select last_row(*) ,last(*) from {dbname}.stb1 partition by tbname order by last_row(*)") # mix with agg functions - tdSql.query("select last(*), last_row(*),last(c1), last_row(c1) from testdb.stb1 ") - tdSql.query("select last(*), last_row(*),last(c1), last_row(c1) from testdb.ct1 ") - tdSql.query("select last(*), last_row(*),last(c1+1)*max(c1), last_row(c1+2)/2 from testdb.t1 ") - tdSql.query("select last_row(*) ,abs(c1/2)+100 from testdb.stb1 where tbname =\"ct1\" ") - tdSql.query("select c1, last_row(c5) from testdb.ct1 ") - tdSql.error("select c1, last_row(c5) ,last(c1) from testdb.stb1 ") + tdSql.query(f"select last(*), last_row(*),last(c1), last_row(c1) from {dbname}.stb1 ") + tdSql.query(f"select last(*), last_row(*),last(c1), last_row(c1) from {dbname}.ct1 ") + tdSql.query(f"select last(*), last_row(*),last(c1+1)*max(c1), last_row(c1+2)/2 from {dbname}.t1 ") + tdSql.query(f"select last_row(*) ,abs(c1/2)+100 from {dbname}.stb1 where tbname =\"ct1\" ") + tdSql.query(f"select c1, last_row(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, last_row(c5) ,last(c1) from {dbname}.stb1 ") # agg functions mix with agg functions - tdSql.query("select last(c1) , max(c5), count(c5) from testdb.stb1") - tdSql.query("select last_row(c1) , max(c5), count(c5) from testdb.ct1") + tdSql.query(f"select last(c1) , max(c5), count(c5) from {dbname}.stb1") + tdSql.query(f"select last_row(c1) , max(c5), count(c5) from {dbname}.ct1") # bug fix for compute - tdSql.query("select last_row(c1) -0 ,last(c1)-0 ,last(c1)+last_row(c1) from testdb.ct4 ") + tdSql.query(f"select last_row(c1) -0 ,last(c1)-0 ,last(c1)+last_row(c1) from {dbname}.ct4 ") tdSql.checkData(0,0,None) tdSql.checkData(0,1,0.000000000) tdSql.checkData(0,2,None) - tdSql.query(" select c1, abs(c1) -0 ,last_row(c1-0.1)-0.1 from testdb.ct1") + tdSql.query(f"select c1, abs(c1) -0 ,last_row(c1-0.1)-0.1 from {dbname}.ct1") tdSql.checkData(0,0,9) tdSql.checkData(0,1,9.000000000) tdSql.checkData(0,2,8.800000000) - def abs_func_filter(self): - tdSql.execute("use db") + def abs_func_filter(self, dbname="db"): tdSql.query( - "select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,last_row(log(c1,2)-0.5) from db.ct4 where c1>5 ") + f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,last_row(log(c1,2)-0.5) from {dbname}.ct4 where c1>5 ") tdSql.checkData(0, 0, 6) tdSql.checkData(0, 1, 6.000000000) tdSql.checkData(0, 2, 6.000000000) @@ -542,19 +535,19 @@ class TDTestCase: tdSql.checkData(0, 4, 2.084962501) tdSql.query( - "select last_row(c1,c2,c1+5) from db.ct4 where c1=5 ") + f"select last_row(c1,c2,c1+5) from {dbname}.ct4 where c1=5 ") tdSql.checkData(0, 0, 5) tdSql.checkData(0, 1, 55555) tdSql.checkData(0, 2, 10.000000000) tdSql.query( - "select last(c1,c2,c1+5) from db.ct4 where c1=5 ") + f"select last(c1,c2,c1+5) from {dbname}.ct4 where c1=5 ") tdSql.checkData(0, 0, 5) tdSql.checkData(0, 1, 55555) tdSql.checkData(0, 2, 10.000000000) tdSql.query( - "select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from db.ct4 where c1>log(c1,2) limit 1 ") + f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1>log(c1,2) limit 1 ") tdSql.checkRows(1) tdSql.checkData(0, 0, 8) tdSql.checkData(0, 1, 88888) @@ -566,166 +559,162 @@ class TDTestCase: def abs_Arithmetic(self): pass - def check_boundary_values(self): + def check_boundary_values(self, dbname="bound_test"): - tdSql.execute("drop database if exists bound_test") - tdSql.execute("create database if not exists bound_test cachemodel 'LAST_ROW' ") + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname} cachemodel 'LAST_ROW' ") time.sleep(3) - tdSql.execute("use bound_test") + tdSql.execute(f"use {dbname}") tdSql.execute( - "create table bound_test.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" ) - tdSql.execute(f'create table bound_test.sub1_bound using stb_bound tags ( 1 )') + tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )') tdSql.execute( - f"insert into bound_test.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into bound_test.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into bound_test.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into bound_test.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.error( - f"insert into bound_test.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) # check basic elem for table per row tdSql.query( - "select last(c1) ,last_row(c2), last_row(c3)+1 , last(c4)+1 from bound_test.sub1_bound ") + f"select last(c1) ,last_row(c2), last_row(c3)+1 , last(c4)+1 from {dbname}.sub1_bound ") tdSql.checkData(0, 0, -2147483646) tdSql.checkData(0, 1, -9223372036854775806) tdSql.checkData(0, 2, -32765.000000000) tdSql.checkData(0, 3, -125.000000000) # check + - * / in functions tdSql.query( - "select last_row(c1+1) ,last_row(c2) , last(c3*1) , last(c4/2) from bound_test.sub1_bound ") - - def test_tag_compute_for_scalar_function(self): - - tdSql.execute("use testdb") + f"select last_row(c1+1) ,last_row(c2) , last(c3*1) , last(c4/2) from {dbname}.sub1_bound ") + def test_tag_compute_for_scalar_function(self, dbname="testdb"): # bug need fix - tdSql.query(" select sum(c1) from testdb.stb1 where t1+10 >1; ") - tdSql.query("select c1 ,t1 from testdb.stb1 where t1 =0 ") + tdSql.query(f"select sum(c1) from {dbname}.stb1 where t1+10 >1; ") + tdSql.query(f"select c1 ,t1 from {dbname}.stb1 where t1 =0 ") tdSql.checkRows(13) - tdSql.query("select last_row(c1,t1) from testdb.stb1 ") + tdSql.query(f"select last_row(c1,t1) from {dbname}.stb1 ") tdSql.checkData(0,0,None) tdSql.checkData(0,1,3) - tdSql.query("select last_row(c1),t1 from testdb.stb1 ") + tdSql.query(f"select last_row(c1),t1 from {dbname}.stb1 ") tdSql.checkData(0,0,None) tdSql.checkData(0,1,3) - tdSql.query("select last_row(c1,t1),last(t1) from testdb.stb1 ") + tdSql.query(f"select last_row(c1,t1),last(t1) from {dbname}.stb1 ") tdSql.checkData(0,0,None) tdSql.checkData(0,1,3) tdSql.checkData(0,2,3) - tdSql.query("select last_row(t1) from testdb.stb1 where t1 >0 ") + tdSql.query(f"select last_row(t1) from {dbname}.stb1 where t1 >0 ") tdSql.checkRows(1) tdSql.checkData(0,0,3) - tdSql.query("select last_row(t1) from testdb.stb1 where t1 =3 ") + tdSql.query(f"select last_row(t1) from {dbname}.stb1 where t1 =3 ") tdSql.checkRows(1) tdSql.checkData(0,0,3) - tdSql.query("select last_row(t1) from testdb.stb1 where t1 =2") + tdSql.query(f"select last_row(t1) from {dbname}.stb1 where t1 =2") tdSql.checkRows(0) # nest query for last_row - tdSql.query("select last_row(t1) from (select ts , c1 ,t1 from testdb.stb1)") + tdSql.query(f"select last_row(t1) from (select ts , c1 ,t1 from {dbname}.stb1)") tdSql.checkData(0,0,3) - tdSql.query("select distinct(c1) ,t1 from testdb.stb1") + tdSql.query(f"select distinct(c1) ,t1 from {dbname}.stb1") tdSql.checkRows(20) - tdSql.query("select last_row(c1) from (select _rowts , c1 ,t1 from testdb.stb1)") + tdSql.query(f"select last_row(c1) from (select _rowts , c1 ,t1 from {dbname}.stb1)") tdSql.checkData(0,0,None) - tdSql.query("select last_row(c1) from (select ts , c1 ,t1 from testdb.stb1)") + tdSql.query(f"select last_row(c1) from (select ts , c1 ,t1 from {dbname}.stb1)") tdSql.checkData(0,0,None) - tdSql.query("select ts , last_row(c1) ,c1 from (select ts , c1 ,t1 from testdb.stb1)") + tdSql.query(f"select ts , last_row(c1) ,c1 from (select ts , c1 ,t1 from {dbname}.stb1)") tdSql.checkData(0,1,None) - tdSql.query("select ts , last_row(c1) ,c1 from (select ts , max(c1) c1 ,t1 from testdb.stb1 where ts >now -1h and ts now -1h and ts now -1h and ts now -1h and ts now -1h and ts now -1h and ts ="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" interval(50d) sliding(30d) fill(NULL)') + tdSql.query(f'select max(c1) from {dbname}.t1 where ts>="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" interval(50d) sliding(30d) fill(NULL)') tdSql.checkRows(8) tdSql.checkData(7,0,None) - tdSql.query('select last_row(c1) from testdb.t1 where ts>="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" interval(50d) sliding(30d) fill(value ,2 )') + tdSql.query(f'select last_row(c1) from {dbname}.t1 where ts>="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" interval(50d) sliding(30d) fill(value ,2 )') tdSql.checkRows(8) tdSql.checkData(7,0,2) - tdSql.query('select last_row(c1) from testdb.stb1 where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s)') - tdSql.query('select last_row(c1) from (select ts , c1 from testdb.t1 where ts>="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" ) interval(10s) sliding(5s)') + tdSql.query(f'select last_row(c1) from {dbname}.stb1 where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s)') + tdSql.query(f'select last_row(c1) from (select ts , c1 from {dbname}.t1 where ts>="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" ) interval(10s) sliding(5s)') # join - tdSql.query("use test") - tdSql.query("select last(sub_tb_1.c1), last(sub_tb_2.c2) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts") + db1 = "test" + tdSql.query(f"use {db1}") + tdSql.query(f"select last(sub_tb_1.c1), last(sub_tb_2.c2) from {db1}.sub_tb_1 sub_tb_1, {db1}.sub_tb_2 sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts") tdSql.checkCols(2) last_row_result = tdSql.queryResult - tdSql.query("select last_row(sub_tb_1.c1), last_row(sub_tb_2.c2) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts") + tdSql.query(f"select last_row(sub_tb_1.c1), last_row(sub_tb_2.c2) from {db1}.sub_tb_1 sub_tb_1, {db1}.sub_tb_2 sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts") for ind , row in enumerate(last_row_result): tdSql.checkData(ind , 0 , row[0]) - tdSql.query("select last(*), last(*) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts") + tdSql.query(f"select last(*), last(*) from {db1}.sub_tb_1 sub_tb_1, {db1}.sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts") last_row_result = tdSql.queryResult - tdSql.query("select last_row(*), last_row(*) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts") + tdSql.query(f"select last_row(*), last_row(*) from {db1}.sub_tb_1 sub_tb_1, {db1}.sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts") for ind , row in enumerate(last_row_result): tdSql.checkData(ind , 0 , row[0]) - tdSql.query("select last(*), last_row(*) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts") + tdSql.query(f"select last(*), last_row(*) from {db1}.sub_tb_1 sub_tb_1, {db1}.sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts") for ind , row in enumerate(last_row_result): tdSql.checkData(ind , 0 , row[0]) - tdSql.query("select last_row(*), last(*) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts") + tdSql.query(f"select last_row(*), last(*) from {db1}.sub_tb_1 sub_tb_1, {db1}.sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts") for ind , row in enumerate(last_row_result): tdSql.checkData(ind , 0 , row[0]) - def support_super_table_test(self): - tdSql.execute(" use testdb ") - self.check_result_auto( " select c1 from testdb.stb1 order by ts " , "select abs(c1) from testdb.stb1 order by ts" ) - self.check_result_auto( " select c1 from testdb.stb1 order by tbname " , "select abs(c1) from testdb.stb1 order by tbname" ) - self.check_result_auto( " select c1 from testdb.stb1 where c1 > 0 order by tbname " , "select abs(c1) from testdb.stb1 where c1 > 0 order by tbname" ) - self.check_result_auto( " select c1 from testdb.stb1 where c1 > 0 order by tbname " , "select abs(c1) from testdb.stb1 where c1 > 0 order by tbname" ) + def support_super_table_test(self, dbname="testdb"): + self.check_result_auto( f"select c1 from {dbname}.stb1 order by ts " , f"select abs(c1) from {dbname}.stb1 order by ts" ) + self.check_result_auto( f"select c1 from {dbname}.stb1 order by tbname " , f"select abs(c1) from {dbname}.stb1 order by tbname" ) + self.check_result_auto( f"select c1 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select abs(c1) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto( f"select c1 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select abs(c1) from {dbname}.stb1 where c1 > 0 order by tbname" ) - self.check_result_auto( " select t1,c1 from testdb.stb1 order by ts " , "select t1, abs(c1) from testdb.stb1 order by ts" ) - self.check_result_auto( " select t2,c1 from testdb.stb1 order by tbname " , "select t2 ,abs(c1) from testdb.stb1 order by tbname" ) - self.check_result_auto( " select t3,c1 from testdb.stb1 where c1 > 0 order by tbname " , "select t3 ,abs(c1) from testdb.stb1 where c1 > 0 order by tbname" ) - self.check_result_auto( " select t4,c1 from testdb.stb1 where c1 > 0 order by tbname " , "select t4 , abs(c1) from testdb.stb1 where c1 > 0 order by tbname" ) - pass + self.check_result_auto( f"select t1,c1 from {dbname}.stb1 order by ts " , f"select t1, abs(c1) from {dbname}.stb1 order by ts" ) + self.check_result_auto( f"select t2,c1 from {dbname}.stb1 order by tbname " , f"select t2 ,abs(c1) from {dbname}.stb1 order by tbname" ) + self.check_result_auto( f"select t3,c1 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select t3 ,abs(c1) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto( f"select t4,c1 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select t4 , abs(c1) from {dbname}.stb1 where c1 > 0 order by tbname" ) def basic_query(self): diff --git a/tests/system-test/2-query/leastsquares.py b/tests/system-test/2-query/leastsquares.py index 3bab509942a54425f09e2ca25e8f6d9434852f97..fe7188a54547d9c63ae962b3330c6316d5672bba 100644 --- a/tests/system-test/2-query/leastsquares.py +++ b/tests/system-test/2-query/leastsquares.py @@ -26,6 +26,7 @@ TS_TYPE_COL = [ TS_COL, ] ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ] +DBNAME = "db" class TDTestCase: def init(self, conn, logSql): @@ -133,13 +134,13 @@ class TDTestCase: return f"select leastsquares({select_clause}, {start_val}, {step_val}) from {from_clause} {where_condition} {group_condition}" @property - def __tb_list(self): + def __tb_list(self, dbname=DBNAME): return [ - "ct1", - "ct4", - "t1", - "ct2", - "stb1", + f"{dbname}.ct1", + f"{dbname}.ct4", + f"{dbname}.nt1", + f"{dbname}.ct2", + f"{dbname}.stb1", ] @property @@ -161,36 +162,37 @@ class TDTestCase: err_sqls = [] __no_join_tblist = self.__tb_list for tb in __no_join_tblist: - select_claus_list = self.__query_condition(tb) - for select_claus in select_claus_list: - group_claus = self.__group_condition(col=select_claus) - where_claus = self.__where_condition(query_conditon=select_claus) - having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null") - for arg in self.start_step_val: - if not isinstance(arg,int) or isinstance(arg, bool) : - err_sqls.extend( - ( - self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg), - self.__single_sql(select_clause=select_claus, from_clause=tb, step_val=arg, group_condition=group_claus), - self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, where_condition=where_claus, group_condition=having_claus), - ) + tbname = tb.split(".")[-1] + select_claus_list = self.__query_condition(tbname) + for select_claus in select_claus_list: + group_claus = self.__group_condition(col=select_claus) + where_claus = self.__where_condition(query_conditon=select_claus) + having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null") + for arg in self.start_step_val: + if not isinstance(arg,int) or isinstance(arg, bool) : + err_sqls.extend( + ( + self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg), + self.__single_sql(select_clause=select_claus, from_clause=tb, step_val=arg, group_condition=group_claus), + self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, where_condition=where_claus, group_condition=having_claus), ) - elif isinstance(select_claus, str) and any([BOOL_COL in select_claus, BINARY_COL in select_claus, NCHAR_COL in select_claus, TS_COL in select_claus]): - err_sqls.extend( - ( - self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg), - self.__single_sql(select_clause=select_claus, from_clause=tb, step_val=arg, group_condition=group_claus), - self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, where_condition=where_claus, group_condition=having_claus), - ) + ) + elif isinstance(select_claus, str) and any([BOOL_COL in select_claus, BINARY_COL in select_claus, NCHAR_COL in select_claus, TS_COL in select_claus]): + err_sqls.extend( + ( + self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg), + self.__single_sql(select_clause=select_claus, from_clause=tb, step_val=arg, group_condition=group_claus), + self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, where_condition=where_claus, group_condition=having_claus), ) - else: - current_sqls.extend( - ( - self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, step_val=0), - self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=0, step_val=arg, group_condition=group_claus), - self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, step_val=arg, where_condition=where_claus, group_condition=having_claus), - ) + ) + else: + current_sqls.extend( + ( + self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, step_val=0), + self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=0, step_val=arg, group_condition=group_claus), + self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, step_val=arg, where_condition=where_claus, group_condition=having_claus), ) + ) # return filter(None, sqls) return list(filter(None, current_sqls)), list(filter(None, err_sqls)) @@ -207,25 +209,25 @@ class TDTestCase: def __test_current(self): - # tdSql.query("explain select c1 from ct1") - # tdSql.query("explain select 1 from ct2") - # tdSql.query("explain select cast(ceil(c6) as bigint) from ct4 group by c6") - # tdSql.query("explain select count(c3) from ct4 group by c7 having count(c3) > 0") - # tdSql.query("explain select ct2.c3 from ct4 join ct2 on ct4.ts=ct2.ts") + # tdSql.query("explain select c1 from {dbname}.ct1") + # tdSql.query("explain select 1 from {dbname}.ct2") + # tdSql.query("explain select cast(ceil(c6) as bigint) from {dbname}.ct4 group by c6") + # tdSql.query("explain select count(c3) from {dbname}.ct4 group by c7 having count(c3) > 0") + # tdSql.query("explain select ct2.c3 from {dbname}.ct4 join ct2 on ct4.ts=ct2.ts") # tdSql.query("explain select c1 from stb1 where c1 is not null and c1 in (0, 1, 2) or c1 between 2 and 100 ") self.leastsquares_check() - def __test_error(self): + def __test_error(self, dbname=DBNAME): tdLog.printNoPrefix("===step 0: err case, must return err") - tdSql.error( "select leastsquares(c1) from ct8" ) - tdSql.error( "select leastsquares(c1, 1) from ct1 " ) - tdSql.error( "select leastsquares(c1, null, 1) from ct1 " ) - tdSql.error( "select leastsquares(c1, 1, null) from ct1 " ) - tdSql.error( "select leastsquares(null, 1, 1) from ct1 " ) - tdSql.error( '''select leastsquares(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']) - from ct1 + tdSql.error( f"select leastsquares(c1) from {dbname}.ct8" ) + tdSql.error( f"select leastsquares(c1, 1) from {dbname}.ct1 " ) + tdSql.error( f"select leastsquares(c1, null, 1) from {dbname}.ct1 " ) + tdSql.error( f"select leastsquares(c1, 1, null) from {dbname}.ct1 " ) + tdSql.error( f"select leastsquares(null, 1, 1) from {dbname}.ct1 " ) + tdSql.error( f'''select leastsquares(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']) + from {dbname}.ct1 where ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null group by ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' ) @@ -234,16 +236,16 @@ class TDTestCase: self.__test_error() self.__test_current() - def __create_tb(self): + def __create_tb(self, dbname=DBNAME): tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table stb1( + create_stb_sql = f'''create table {dbname}.stb1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp ) tags (t1 int) ''' - create_ntb_sql = f'''create table t1( + create_ntb_sql = f'''create table {dbname}.nt1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp @@ -253,30 +255,29 @@ class TDTestCase: tdSql.execute(create_ntb_sql) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') - { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') - def __insert_data(self, rows): + def __insert_data(self, rows, dbname=DBNAME): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) for i in range(rows): tdSql.execute( - f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f'''insert into ct1 values + f'''insert into {dbname}.ct1 values ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) ''' ) tdSql.execute( - f'''insert into ct4 values + f'''insert into {dbname}.ct4 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -292,7 +293,7 @@ class TDTestCase: ) tdSql.execute( - f'''insert into ct2 values + f'''insert into {dbname}.ct2 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -308,13 +309,13 @@ class TDTestCase: ) for i in range(rows): - insert_data = f'''insert into t1 values + insert_data = f'''insert into {dbname}.nt1 values ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) ''' tdSql.execute(insert_data) tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.nt1 values ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -332,7 +333,7 @@ class TDTestCase: def run(self): - tdSql.prepare() + tdSql.prepare(DBNAME) tdLog.printNoPrefix("==========step1:create table") self.__create_tb() @@ -344,10 +345,9 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - tdDnodes.stop(1) - tdDnodes.start(1) + tdSql.execute(f"flush database {DBNAME}") - tdSql.execute("use db") + tdSql.execute(f"use {DBNAME}") tdLog.printNoPrefix("==========step4:after wal, all check again ") self.all_test() diff --git a/tests/system-test/2-query/length.py b/tests/system-test/2-query/length.py index ed604c41ae351e9f03e51b4a6f77160cc463529c..1761572245a8ca5a1c814e21c566ffb1634e419a 100644 --- a/tests/system-test/2-query/length.py +++ b/tests/system-test/2-query/length.py @@ -19,6 +19,7 @@ TS_COL = "c10" UN_CHAR_COL = [INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, ] CHAR_COL = [ BINARY_COL, NCHAR_COL, ] TS_TYPE_COL = [TS_COL] +DBNAME = "db" class TDTestCase: @@ -102,16 +103,16 @@ class TDTestCase: return sqls - def __test_current(self): + def __test_current(self, dbname=DBNAME): tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.nt1", f"{dbname}.stb1"] for tb in tbname: self.__length_current_check(tb) tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") - def __test_error(self): + def __test_error(self, dbname=DBNAME): tdLog.printNoPrefix("==========err sql condition check , must return error==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.nt1", f"{dbname}.stb1"] for tb in tbname: for errsql in self.__length_err_check(tb): @@ -124,17 +125,16 @@ class TDTestCase: self.__test_error() - def __create_tb(self): - tdSql.prepare() + def __create_tb(self, dbname=DBNAME): tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table stb1( + create_stb_sql = f'''create table {dbname}.stb1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp ) tags (t1 int) ''' - create_ntb_sql = f'''create table t1( + create_ntb_sql = f'''create table {dbname}.nt1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp @@ -144,29 +144,29 @@ class TDTestCase: tdSql.execute(create_ntb_sql) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') - def __insert_data(self, rows): + def __insert_data(self, rows, dbname=DBNAME): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) for i in range(rows): tdSql.execute( - f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" ) tdSql.execute( - f'''insert into ct1 values + f'''insert into {dbname}.ct1 values ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } ) ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } ) ''' ) tdSql.execute( - f'''insert into ct4 values + f'''insert into {dbname}.ct4 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -182,7 +182,7 @@ class TDTestCase: ) tdSql.execute( - f'''insert into ct2 values + f'''insert into {dbname}.ct2 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -198,13 +198,13 @@ class TDTestCase: ) for i in range(rows): - insert_data = f'''insert into t1 values + insert_data = f'''insert into {dbname}.nt1 values ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, "binary_{i}", "nchar_{i}", { now_time - 1000 * i } ) ''' tdSql.execute(insert_data) tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.nt1 values ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -233,8 +233,7 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - tdDnodes.stop(1) - tdDnodes.start(1) + tdSql.execute("flush database db") tdSql.execute("use db") diff --git a/tests/system-test/2-query/log.py b/tests/system-test/2-query/log.py index b8e0aaf52ee38ed22c82a0e28f5fccf85082b347..358d2b9551a1ee8db9446babd2588ee47404ac67 100644 --- a/tests/system-test/2-query/log.py +++ b/tests/system-test/2-query/log.py @@ -10,48 +10,46 @@ from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - def prepare_datas(self): + def prepare_datas(self, dbname="db"): tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -67,96 +65,8 @@ class TDTestCase: ''' ) + def check_result_auto_log(self ,base , origin_query , log_query): - def check_result_auto_log(self ,origin_query , log_query): - - log_result = tdSql.getResult(log_query) - origin_result = tdSql.getResult(origin_query) - - auto_result =[] - - for row in origin_result: - row_check = [] - for elem in row: - if elem == None: - elem = None - elif elem >0: - elem = math.log(elem) - elif elem <=0: - elem = None - row_check.append(elem) - auto_result.append(row_check) - - check_status = True - for row_index , row in enumerate(log_result): - for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] != elem: - check_status = False - if not check_status: - tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query ) - sys.exit(1) - else: - tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query ) - - def check_result_auto_log2(self ,origin_query , log_query): - - log_result = tdSql.getResult(log_query) - origin_result = tdSql.getResult(origin_query) - - auto_result =[] - - for row in origin_result: - row_check = [] - for elem in row: - if elem == None: - elem = None - elif elem >0: - elem = math.log(elem,2) - elif elem <=0: - elem = None - row_check.append(elem) - auto_result.append(row_check) - - check_status = True - for row_index , row in enumerate(log_result): - for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] != elem: - check_status = False - if not check_status: - tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query ) - sys.exit(1) - else: - tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query ) - - def check_result_auto_log1(self ,origin_query , log_query): - log_result = tdSql.getResult(log_query) - origin_result = tdSql.getResult(origin_query) - - auto_result =[] - - for row in origin_result: - row_check = [] - for elem in row: - if elem == None: - elem = None - elif elem >0: - elem = None - elif elem <=0: - elem = None - row_check.append(elem) - auto_result.append(row_check) - - check_status = True - for row_index , row in enumerate(log_result): - for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] != elem: - check_status = False - if not check_status: - tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query ) - sys.exit(1) - else: - tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query ) - def check_result_auto_log__10(self ,origin_query , log_query): log_result = tdSql.getResult(log_query) origin_result = tdSql.getResult(origin_query) @@ -165,88 +75,92 @@ class TDTestCase: for row in origin_result: row_check = [] for elem in row: - if elem == None: - elem = None - elif elem >0: - elem = None - elif elem <=0: + if base ==1: elem = None + else: + if elem == None: + elem = None + elif elem ==1: + elem = 0.0 + elif elem >0 and elem !=1 : + if base==None : + elem = math.log(elem ) + else: + print(base , elem) + elem = math.log(elem , base) + elif elem <=0: + elem = None + row_check.append(elem) auto_result.append(row_check) - check_status = True + tdSql.query(log_query) for row_index , row in enumerate(log_result): for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] != elem: - check_status = False - if not check_status: - tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query ) - sys.exit(1) - else: - tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query ) - - def test_errors(self): + tdSql.checkData(row_index , col_index ,auto_result[row_index][col_index]) + + def test_errors(self, dbname="db"): error_sql_lists = [ - "select log from t1", - # "select log(-+--+c1 ,2) from t1", - # "select +-log(c1,2) from t1", - # "select ++-log(c1,2) from t1", - # "select ++--log(c1,2) from t1", - # "select - -log(c1,2)*0 from t1", - # "select log(tbname+1,2) from t1 ", - "select log(123--123,2)==1 from t1", - "select log(c1,2) as 'd1' from t1", - "select log(c1 ,c2 ,2) from t1", - "select log(c1 ,NULL ,2) from t1", - "select log(, 2) from t1;", - "select log(log(c1, 2) ab from t1)", - "select log(c1 ,2 ) as int from t1", - "select log from stb1", - # "select log(-+--+c1) from stb1", - # "select +-log(c1) from stb1", - # "select ++-log(c1) from stb1", - # "select ++--log(c1) from stb1", - # "select - -log(c1)*0 from stb1", - # "select log(tbname+1) from stb1 ", - "select log(123--123 ,2)==1 from stb1", - "select log(c1 ,2) as 'd1' from stb1", - "select log(c1 ,c2 ,2 ) from stb1", - "select log(c1 ,NULL,2) from stb1", - "select log(,) from stb1;", - "select log(log(c1 , 2) ab from stb1)", - "select log(c1 , 2) as int from stb1" + f"select log from {dbname}.t1", + # f"select log(-+--+c1 ,2) from {dbname}.t1", + # f"select +-log(c1,2) from {dbname}.t1", + # f"select ++-log(c1,2) from {dbname}.t1", + # f"select ++--log(c1,2) from {dbname}.t1", + # f"select - -log(c1,2)*0 from {dbname}.t1", + # f"select log(tbname+1,2) from {dbname}.t1 ", + f"select log(123--123,2)==1 from {dbname}.t1", + f"select log(c1,2) as 'd1' from {dbname}.t1", + f"select log(c1 ,c2 ,2) from {dbname}.t1", + f"select log(c1 ,NULL ,2) from {dbname}.t1", + f"select log(, 2) from {dbname}.t1;", + f"select log(log(c1, 2) ab from {dbname}.t1)", + f"select log(c1 ,2 ) as int from {dbname}.t1", + f"select log from {dbname}.stb1", + # f"select log(-+--+c1) from {dbname}.stb1", + # f"select +-log(c1) from {dbname}.stb1", + # f"select ++-log(c1) from {dbname}.stb1", + # f"select ++--log(c1) from {dbname}.stb1", + # f"select - -log(c1)*0 from {dbname}.stb1", + # f"select log(tbname+1) from {dbname}.stb1 ", + f"select log(123--123 ,2)==1 from {dbname}.stb1", + f"select log(c1 ,2) as 'd1' from {dbname}.stb1", + f"select log(c1 ,c2 ,2 ) from {dbname}.stb1", + f"select log(c1 ,NULL,2) from {dbname}.stb1", + f"select log(,) from {dbname}.stb1;", + f"select log(log(c1 , 2) ab from {dbname}.stb1)", + f"select log(c1 , 2) as int from {dbname}.stb1" ] for error_sql in error_sql_lists: tdSql.error(error_sql) - def support_types(self): + def support_types(self, dbname="db"): type_error_sql_lists = [ - "select log(ts ,2 ) from t1" , - "select log(c7,c2 ) from t1", - "select log(c8,c1 ) from t1", - "select log(c9,c2 ) from t1", - "select log(ts,c7 ) from ct1" , - "select log(c7,c9 ) from ct1", - "select log(c8,c2 ) from ct1", - "select log(c9,c1 ) from ct1", - "select log(ts,2 ) from ct3" , - "select log(c7,2 ) from ct3", - "select log(c8,2 ) from ct3", - "select log(c9,2 ) from ct3", - "select log(ts,2 ) from ct4" , - "select log(c7,2 ) from ct4", - "select log(c8,2 ) from ct4", - "select log(c9,2 ) from ct4", - "select log(ts,2 ) from stb1" , - "select log(c7,2 ) from stb1", - "select log(c8,2 ) from stb1", - "select log(c9,2 ) from stb1" , - - "select log(ts,2 ) from stbbb1" , - "select log(c7,2 ) from stbbb1", - - "select log(ts,2 ) from tbname", - "select log(c9,2 ) from tbname" + f"select log(ts ,2 ) from {dbname}.t1" , + f"select log(c7,c2 ) from {dbname}.t1", + f"select log(c8,c1 ) from {dbname}.t1", + f"select log(c9,c2 ) from {dbname}.t1", + f"select log(ts,c7 ) from {dbname}.ct1" , + f"select log(c7,c9 ) from {dbname}.ct1", + f"select log(c8,c2 ) from {dbname}.ct1", + f"select log(c9,c1 ) from {dbname}.ct1", + f"select log(ts,2 ) from {dbname}.ct3" , + f"select log(c7,2 ) from {dbname}.ct3", + f"select log(c8,2 ) from {dbname}.ct3", + f"select log(c9,2 ) from {dbname}.ct3", + f"select log(ts,2 ) from {dbname}.ct4" , + f"select log(c7,2 ) from {dbname}.ct4", + f"select log(c8,2 ) from {dbname}.ct4", + f"select log(c9,2 ) from {dbname}.ct4", + f"select log(ts,2 ) from {dbname}.stb1" , + f"select log(c7,2 ) from {dbname}.stb1", + f"select log(c8,2 ) from {dbname}.stb1", + f"select log(c9,2 ) from {dbname}.stb1" , + + f"select log(ts,2 ) from {dbname}.stbbb1" , + f"select log(c7,2 ) from {dbname}.stbbb1", + + f"select log(ts,2 ) from {dbname}.tbname", + f"select log(c9,2 ) from {dbname}.tbname" ] @@ -255,98 +169,88 @@ class TDTestCase: type_sql_lists = [ - "select log(c1,2 ) from t1", - "select log(c2,2 ) from t1", - "select log(c3,2 ) from t1", - "select log(c4,2 ) from t1", - "select log(c5,2 ) from t1", - "select log(c6,2 ) from t1", - - "select log(c1,2 ) from ct1", - "select log(c2,2 ) from ct1", - "select log(c3,2 ) from ct1", - "select log(c4,2 ) from ct1", - "select log(c5,2 ) from ct1", - "select log(c6,2 ) from ct1", - - "select log(c1,2 ) from ct3", - "select log(c2,2 ) from ct3", - "select log(c3,2 ) from ct3", - "select log(c4,2 ) from ct3", - "select log(c5,2 ) from ct3", - "select log(c6,2 ) from ct3", - - "select log(c1,2 ) from stb1", - "select log(c2,2 ) from stb1", - "select log(c3,2 ) from stb1", - "select log(c4,2 ) from stb1", - "select log(c5,2 ) from stb1", - "select log(c6,2 ) from stb1", - - "select log(c6,2) as alisb from stb1", - "select log(c6,2) alisb from stb1", + f"select log(c1,2 ) from {dbname}.t1", + f"select log(c2,2 ) from {dbname}.t1", + f"select log(c3,2 ) from {dbname}.t1", + f"select log(c4,2 ) from {dbname}.t1", + f"select log(c5,2 ) from {dbname}.t1", + f"select log(c6,2 ) from {dbname}.t1", + + f"select log(c1,2 ) from {dbname}.ct1", + f"select log(c2,2 ) from {dbname}.ct1", + f"select log(c3,2 ) from {dbname}.ct1", + f"select log(c4,2 ) from {dbname}.ct1", + f"select log(c5,2 ) from {dbname}.ct1", + f"select log(c6,2 ) from {dbname}.ct1", + + f"select log(c1,2 ) from {dbname}.ct3", + f"select log(c2,2 ) from {dbname}.ct3", + f"select log(c3,2 ) from {dbname}.ct3", + f"select log(c4,2 ) from {dbname}.ct3", + f"select log(c5,2 ) from {dbname}.ct3", + f"select log(c6,2 ) from {dbname}.ct3", + + f"select log(c1,2 ) from {dbname}.stb1", + f"select log(c2,2 ) from {dbname}.stb1", + f"select log(c3,2 ) from {dbname}.stb1", + f"select log(c4,2 ) from {dbname}.stb1", + f"select log(c5,2 ) from {dbname}.stb1", + f"select log(c6,2 ) from {dbname}.stb1", + + f"select log(c6,2) as alisb from {dbname}.stb1", + f"select log(c6,2) alisb from {dbname}.stb1", ] for type_sql in type_sql_lists: tdSql.query(type_sql) - def basic_log_function(self): + def basic_log_function(self, dbname="db"): # basic query - tdSql.query("select c1 from ct3") + tdSql.query(f"select c1 from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select c1 from t1") + tdSql.query(f"select c1 from {dbname}.t1") tdSql.checkRows(12) - tdSql.query("select c1 from stb1") + tdSql.query(f"select c1 from {dbname}.stb1") tdSql.checkRows(25) # used for empty table , ct3 is empty - tdSql.query("select log(c1 ,2) from ct3") + tdSql.query(f"select log(c1 ,2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select log(c2 ,2) from ct3") + tdSql.query(f"select log(c2 ,2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select log(c3 ,2) from ct3") + tdSql.query(f"select log(c3 ,2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select log(c4 ,2) from ct3") + tdSql.query(f"select log(c4 ,2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select log(c5 ,2) from ct3") + tdSql.query(f"select log(c5 ,2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select log(c6 ,2) from ct3") + tdSql.query(f"select log(c6 ,2) from {dbname}.ct3") tdSql.checkRows(0) # # used for regular table - tdSql.query("select log(c1 ,2) from t1") + tdSql.query(f"select log(c1 ,2) from {dbname}.t1") tdSql.checkData(0, 0, None) tdSql.checkData(1 , 0, 0.000000000) tdSql.checkData(3 , 0, 1.584962501) tdSql.checkData(5 , 0, None) - tdSql.query("select log(c1) from t1") + tdSql.query(f"select log(c1) from {dbname}.t1") tdSql.checkData(0, 0, None) tdSql.checkData(1 , 0, 0.000000000) tdSql.checkData(2 , 0, 0.693147181) tdSql.checkData(3 , 0, 1.098612289) tdSql.checkData(4 , 0, 1.386294361) - tdSql.query("select c1, c2, c3 , c4, c5 from t1") - tdSql.checkData(1, 4, 1.11000) - tdSql.checkData(3, 3, 33) - tdSql.checkData(5, 4, None) - - tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1") - tdSql.checkData(1, 5, 1.11000) - tdSql.checkData(3, 4, 33) - tdSql.checkData(5, 5, None) - - self.check_result_auto_log( "select c1, c2, c3 , c4, c5 from t1", "select log(c1), log(c2) ,log(c3), log(c4), log(c5) from t1") - self.check_result_auto_log2( "select c1, c2, c3 , c4, c5 from t1", "select log(c1 ,2), log(c2 ,2) ,log(c3, 2), log(c4 ,2), log(c5 ,2) from t1") - self.check_result_auto_log1( "select c1, c2, c3 , c4, c5 from t1", "select log(c1 ,1), log(c2 ,1) ,log(c3, 1), log(c4 ,1), log(c5 ,1) from t1") - self.check_result_auto_log__10( "select c1, c2, c3 , c4, c5 from t1", "select log(c1 ,-10), log(c2 ,-10) ,log(c3, -10), log(c4 ,-10), log(c5 ,-10) from t1") + self.check_result_auto_log( None , f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.t1") + self.check_result_auto_log( 2 , f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,2), log(c2 ,2) ,log(c3, 2), log(c4 ,2), log(c5 ,2) from {dbname}.t1") + self.check_result_auto_log( 1, f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,1), log(c2 ,1) ,log(c3, 1), log(c4 ,1), log(c5 ,1) from {dbname}.t1") + self.check_result_auto_log( 10 ,f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,10), log(c2 ,10) ,log(c3, 10), log(c4 ,10), log(c5 ,10) from {dbname}.t1") # used for sub table - tdSql.query("select c1 ,log(c1 ,3) from ct1") + tdSql.query(f"select c1 ,log(c1 ,3) from {dbname}.ct1") tdSql.checkData(0, 1, 1.892789261) tdSql.checkData(1 , 1, 1.771243749) tdSql.checkData(3 , 1, 1.464973521) @@ -354,19 +258,19 @@ class TDTestCase: # test bug fix for log(c1,c2) - tdSql.query("select c1, c2 ,log(c1,c2) from ct1") + tdSql.query(f"select c1, c2 ,log(c1,c2) from {dbname}.ct1") tdSql.checkData(0 , 2, 0.182485070) tdSql.checkData(1 , 2, 0.172791608) tdSql.checkData(2 , 2, 0.161311499) tdSql.checkData(3 , 2, 0.147315235) tdSql.checkData(4 , 2, None) - self.check_result_auto_log( "select c1, c2, c3 , c4, c5 from ct1", "select log(c1), log(c2) ,log(c3), log(c4), log(c5) from ct1") - self.check_result_auto_log2( "select c1, c2, c3 , c4, c5 from ct1", "select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) from ct1") - self.check_result_auto_log__10( "select c1, c2, c3 , c4, c5 from ct1", "select log(c1,-10), log(c2,-10) ,log(c3,-10), log(c4,-10), log(c5,-10) from ct1") + self.check_result_auto_log( None ,f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.ct1") + self.check_result_auto_log( 2, f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) from {dbname}.ct1") + self.check_result_auto_log( 10 , f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,10), log(c2,10) ,log(c3,10), log(c4,10), log(c5,10) from {dbname}.ct1") # nest query for log functions - tdSql.query("select c1 , log(c1,3) ,log(log(c1,3),3) , log(log(log(c1,3),3),3) from ct1;") + tdSql.query(f"select c1 , log(c1,3) ,log(log(c1,3),3) , log(log(log(c1,3),3),3) from {dbname}.ct1;") tdSql.checkData(0 , 0 , 8) tdSql.checkData(0 , 1 , 1.892789261) tdSql.checkData(0 , 2 , 0.580779541) @@ -384,36 +288,36 @@ class TDTestCase: # # used for stable table - tdSql.query("select log(c1, 2) from stb1") + tdSql.query(f"select log(c1, 2) from {dbname}.stb1") tdSql.checkRows(25) # used for not exists table - tdSql.error("select log(c1, 2) from stbbb1") - tdSql.error("select log(c1, 2) from tbname") - tdSql.error("select log(c1, 2) from ct5") + tdSql.error(f"select log(c1, 2) from {dbname}.stbbb1") + tdSql.error(f"select log(c1, 2) from {dbname}tbname") + tdSql.error(f"select log(c1, 2) from {dbname}.ct5") # mix with common col - tdSql.query("select c1, log(c1 ,2) from ct1") + tdSql.query(f"select c1, log(c1 ,2) from {dbname}.ct1") tdSql.checkData(0 , 0 ,8) tdSql.checkData(0 , 1 ,3.000000000) tdSql.checkData(4 , 0 ,0) tdSql.checkData(4 , 1 ,None) - tdSql.query("select c1, log(c1,2) from ct4") + tdSql.query(f"select c1, log(c1,2) from {dbname}.ct4") tdSql.checkData(0 , 0 , None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(4 , 0 ,5) tdSql.checkData(4 , 1 ,2.321928095) tdSql.checkData(5 , 0 ,None) tdSql.checkData(5 , 1 ,None) - tdSql.query("select c1, log(c1 ,2 ) from ct4 ") + tdSql.query(f"select c1, log(c1 ,2 ) from {dbname}.ct4 ") tdSql.checkData(0 , 0 ,None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(4 , 0 ,5) tdSql.checkData(4 , 1 ,2.321928095) # mix with common functions - tdSql.query("select c1, log(c1 ,2),c5, log(c5 ,2) from ct4 ") + tdSql.query(f"select c1, log(c1 ,2),c5, log(c5 ,2) from {dbname}.ct4 ") tdSql.checkData(0 , 0 ,None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 2 ,None) @@ -424,34 +328,34 @@ class TDTestCase: tdSql.checkData(3 , 2 ,6.66000) tdSql.checkData(3 , 3 ,2.735522144) - tdSql.query("select c1, log(c1,1),c5, floor(c5 ) from stb1 ") + tdSql.query(f"select c1, log(c1,1),c5, floor(c5 ) from {dbname}.stb1 ") # # mix with agg functions , not support - tdSql.error("select c1, log(c1 ,2),c5, count(c5) from stb1 ") - tdSql.error("select c1, log(c1 ,2),c5, count(c5) from ct1 ") - tdSql.error("select log(c1 ,2), count(c5) from stb1 ") - tdSql.error("select log(c1 ,2), count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from stb1 ") + tdSql.error(f"select c1, log(c1 ,2),c5, count(c5) from {dbname}.stb1 ") + tdSql.error(f"select c1, log(c1 ,2),c5, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select log(c1 ,2), count(c5) from {dbname}.stb1 ") + tdSql.error(f"select log(c1 ,2), count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ") # agg functions mix with agg functions - tdSql.query("select max(c5), count(c5) from stb1") - tdSql.query("select max(c5), count(c5) from ct1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1") # bug fix for count - tdSql.query("select count(c1) from ct4 ") + tdSql.query(f"select count(c1) from {dbname}.ct4 ") tdSql.checkData(0,0,9) - tdSql.query("select count(*) from ct4 ") + tdSql.query(f"select count(*) from {dbname}.ct4 ") tdSql.checkData(0,0,12) - tdSql.query("select count(c1) from stb1 ") + tdSql.query(f"select count(c1) from {dbname}.stb1 ") tdSql.checkData(0,0,22) - tdSql.query("select count(*) from stb1 ") + tdSql.query(f"select count(*) from {dbname}.stb1 ") tdSql.checkData(0,0,25) # # bug fix for compute - tdSql.query("select c1, log(c1 ,2) -0 ,log(c1-4 ,2)-0 from ct4 ") + tdSql.query(f"select c1, log(c1 ,2) -0 ,log(c1-4 ,2)-0 from {dbname}.ct4 ") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -459,7 +363,7 @@ class TDTestCase: tdSql.checkData(1, 1, 3.000000000) tdSql.checkData(1, 2, 2.000000000) - tdSql.query(" select c1, log(c1 ,2) -0 ,log(c1-0.1 ,2)-0.1 from ct4") + tdSql.query(f"select c1, log(c1 ,2) -0 ,log(c1-0.1 ,2)-0.1 from {dbname}.ct4") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -467,88 +371,87 @@ class TDTestCase: tdSql.checkData(1, 1, 3.000000000) tdSql.checkData(1, 2, 2.881852653) - tdSql.query("select c1, log(c1, -10), c2, log(c2, -10), c3, log(c3, -10) from ct1") + tdSql.query(f"select c1, log(c1, -10), c2, log(c2, -10), c3, log(c3, -10) from {dbname}.ct1") - def test_big_number(self): + def test_big_number(self, dbname="db"): - tdSql.query("select c1, log(c1, 100000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, log(c1, 100000000) from {dbname}.ct1") # bigint to double data overflow tdSql.checkData(0, 1, 0.112886248) tdSql.checkData(1, 1, 0.105637255) tdSql.checkData(4, 1, None) - tdSql.query("select c1, log(c1, 10000000000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, log(c1, 10000000000000) from {dbname}.ct1") # bigint to double data overflow tdSql.checkData(0, 1, 0.069468461) tdSql.checkData(1, 1, 0.065007542) tdSql.checkData(4, 1, None) - tdSql.query("select c1, log(c1, 10000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, log(c1, 10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, log(c1, 10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, log(c1, 10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(0, 1, 0.036123599) tdSql.checkData(1, 1, 0.033803922) tdSql.checkData(4, 1, None) - tdSql.query("select c1, log(c1, 10000000000000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, log(c1, 10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, log(c1, 10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, log(c1, 10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(0, 1, 0.026561470) tdSql.checkData(1, 1, 0.024855825) tdSql.checkData(4, 1, None) - tdSql.query("select c1, log(c1, 10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, log(c1, 10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, log(c1, 10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, log(c1, 10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(0, 1, 0.022577250) tdSql.checkData(1, 1, 0.021127451) tdSql.checkData(4, 1, None) - tdSql.query("select c1, log(c1, 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, log(c1, 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow - def log_base_test(self): + def log_base_test(self, dbname="db"): # base is an regular number ,int or double - tdSql.query("select c1, log(c1, 2) from ct1") + tdSql.query(f"select c1, log(c1, 2) from {dbname}.ct1") tdSql.checkData(0, 1,3.000000000) - tdSql.query("select c1, log(c1, 2.0) from ct1") + tdSql.query(f"select c1, log(c1, 2.0) from {dbname}.ct1") tdSql.checkData(0, 1, 3.000000000) - tdSql.query("select c1, log(1, 2.0) from ct1") + tdSql.query(f"select c1, log(1, 2.0) from {dbname}.ct1") tdSql.checkData(0, 1, 0.000000000) tdSql.checkRows(13) # # bug for compute in functions - # tdSql.query("select c1, abs(1/0) from ct1") + # tdSql.query(f"select c1, abs(1/0) from {dbname}.ct1") # tdSql.checkData(0, 0, 8) # tdSql.checkData(0, 1, 1) - tdSql.query("select c1, log(1, 2.0) from ct1") + tdSql.query(f"select c1, log(1, 2.0) from {dbname}.ct1") tdSql.checkData(0, 1, 0.000000000) tdSql.checkRows(13) # two cols start log(x,y) - tdSql.query("select c1,c2, log(c1,c2) from ct1") + tdSql.query(f"select c1,c2, log(c1,c2) from {dbname}.ct1") tdSql.checkData(0, 2, 0.182485070) tdSql.checkData(1, 2, 0.172791608) tdSql.checkData(4, 2, None) - tdSql.query("select c1,c2, log(c2,c1) from ct1") + tdSql.query(f"select c1,c2, log(c2,c1) from {dbname}.ct1") tdSql.checkData(0, 2, 5.479900349) tdSql.checkData(1, 2, 5.787318105) tdSql.checkData(4, 2, None) - tdSql.query("select c1, log(2.0 , c1) from ct1") + tdSql.query(f"select c1, log(2.0 , c1) from {dbname}.ct1") tdSql.checkData(0, 1, 0.333333333) tdSql.checkData(1, 1, 0.356207187) tdSql.checkData(4, 1, None) - tdSql.query("select c1, log(2.0 , ceil(abs(c1))) from ct1") + tdSql.query(f"select c1, log(2.0 , ceil(abs(c1))) from {dbname}.ct1") tdSql.checkData(0, 1, 0.333333333) tdSql.checkData(1, 1, 0.356207187) tdSql.checkData(4, 1, None) - def abs_func_filter(self): - tdSql.execute("use db") - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1>5 ") + def abs_func_filter(self, dbname="db"): + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1>5 ") tdSql.checkRows(3) tdSql.checkData(0,0,8) tdSql.checkData(0,1,8.000000000) @@ -556,15 +459,7 @@ class TDTestCase: tdSql.checkData(0,3,7.900000000) tdSql.checkData(0,4,3.000000000) - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ") - tdSql.checkRows(1) - tdSql.checkData(0,0,5) - tdSql.checkData(0,1,5.000000000) - tdSql.checkData(0,2,5.000000000) - tdSql.checkData(0,3,4.900000000) - tdSql.checkData(0,4,2.000000000) - - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ") + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1=5 ") tdSql.checkRows(1) tdSql.checkData(0,0,5) tdSql.checkData(0,1,5.000000000) @@ -572,7 +467,7 @@ class TDTestCase: tdSql.checkData(0,3,4.900000000) tdSql.checkData(0,4,2.000000000) - tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1>log(c1,2) limit 1 ") + tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1>log(c1,2) limit 1 ") tdSql.checkRows(1) tdSql.checkData(0,0,8) tdSql.checkData(0,1,88888) @@ -581,46 +476,43 @@ class TDTestCase: tdSql.checkData(0,4,7.900000000) tdSql.checkData(0,5,3.000000000) - def log_Arithmetic(self): - pass - def check_boundary_values(self): + def check_boundary_values(self, dbname="bound_test"): - tdSql.execute("drop database if exists bound_test") - tdSql.execute("create database if not exists bound_test") + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname}") time.sleep(3) - tdSql.execute("use bound_test") tdSql.execute( - "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" ) - tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )') tdSql.execute( - f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.error( - f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - self.check_result_auto_log( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select log(c1), log(c2) ,log(c3), log(c4), log(c5) ,log(c6) from sub1_bound") - self.check_result_auto_log2( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) ,log(c6,2) from sub1_bound") - self.check_result_auto_log__10( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select log(c1,-10), log(c2,-10) ,log(c3,-10), log(c4,-10), log(c5,-10) ,log(c6,-10) from sub1_bound") + self.check_result_auto_log(None , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) ,log(c6) from {dbname}.sub1_bound") + self.check_result_auto_log( 2 , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) ,log(c6,2) from {dbname}.sub1_bound") + self.check_result_auto_log( 10 , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,10), log(c2,10) ,log(c3,10), log(c4,10), log(c5,10) ,log(c6,10) from {dbname}.sub1_bound") - self.check_result_auto_log2( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select log(c1,2), log(c2,2) ,log(c3,2), log(c3,2), log(c2,2) ,log(c1,2) from sub1_bound") - self.check_result_auto_log( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select log(c1), log(c2) ,log(c3), log(c3), log(c2) ,log(c1) from sub1_bound") + self.check_result_auto_log( 2 , f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c3,2), log(c2,2) ,log(c1,2) from {dbname}.sub1_bound") + self.check_result_auto_log( None , f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c3), log(c2) ,log(c1) from {dbname}.sub1_bound") - self.check_result_auto_log2("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select log(abs(c1) ,2) from sub1_bound" ) + self.check_result_auto_log(2 , f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select log(abs(c1) ,2) from {dbname}.sub1_bound" ) # check basic elem for table per row - tdSql.query("select log(abs(c1),2) ,log(abs(c2),2) , log(abs(c3),2) , log(abs(c4),2), log(abs(c5),2), log(abs(c6),2) from sub1_bound ") + tdSql.query(f"select log(abs(c1),2) ,log(abs(c2),2) , log(abs(c3),2) , log(abs(c4),2), log(abs(c5),2), log(abs(c6),2) from {dbname}.sub1_bound ") tdSql.checkData(0,0,math.log(2147483647,2)) tdSql.checkData(0,1,math.log(9223372036854775807 ,2)) tdSql.checkData(0,2,math.log(32767,2)) @@ -641,7 +533,7 @@ class TDTestCase: tdSql.checkData(3,5,math.log(169999999999999993883079578865998174333346074304075874502773119193537729178160565864330091787584707988572262467983188919169916105593357174268369962062473635296474636515660464935663040684957844303524367815028553272712298986386310828644513212353921123253311675499856875650512437415429217994623324794855339589632.000000000,2)) # check basic elem for table per row - tdSql.query("select log(abs(c1)) ,log(abs(c2)) , log(abs(c3)) , log(abs(c4)), log(abs(c5)), log(abs(c6)) from sub1_bound ") + tdSql.query(f"select log(abs(c1)) ,log(abs(c2)) , log(abs(c3)) , log(abs(c4)), log(abs(c5)), log(abs(c6)) from {dbname}.sub1_bound ") tdSql.checkData(0,0,math.log(2147483647)) tdSql.checkData(0,1,math.log(9223372036854775807)) tdSql.checkData(0,2,math.log(32767)) @@ -661,28 +553,25 @@ class TDTestCase: tdSql.checkData(3,4,math.log(339999995214436424907732413799364296704.00000)) tdSql.checkData(3,5,math.log(169999999999999993883079578865998174333346074304075874502773119193537729178160565864330091787584707988572262467983188919169916105593357174268369962062473635296474636515660464935663040684957844303524367815028553272712298986386310828644513212353921123253311675499856875650512437415429217994623324794855339589632.000000000)) - - # check + - * / in functions - tdSql.query("select log(abs(c1+1) ,2) ,log(abs(c2),2) , log(abs(c3*1),2) , log(abs(c4/2),2), log(abs(c5) ,2)/2, log(abs(c6) ,2) from sub1_bound ") + tdSql.query(f"select log(abs(c1+1) ,2) ,log(abs(c2),2) , log(abs(c3*1),2) , log(abs(c4/2),2), log(abs(c5) ,2)/2, log(abs(c6) ,2) from {dbname}.sub1_bound ") tdSql.checkData(0,0,math.log(2147483648.000000000,2)) tdSql.checkData(0,1,math.log(9223372036854775807,2)) tdSql.checkData(0,2,math.log(32767.000000000,2)) tdSql.checkData(0,3,math.log(63.500000000,2)) tdSql.checkData(0,4,63.999401166) - def support_super_table_test(self): - tdSql.execute(" use db ") - self.check_result_auto_log2( " select c5 from stb1 order by ts " , "select log(c5,2) from stb1 order by ts" ) - self.check_result_auto_log2( " select c5 from stb1 order by tbname " , "select log(c5,2) from stb1 order by tbname" ) - self.check_result_auto_log2( " select c5 from stb1 where c1 > 0 order by tbname " , "select log(c5,2) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_log2( " select c5 from stb1 where c1 > 0 order by tbname " , "select log(c5,2) from stb1 where c1 > 0 order by tbname" ) - - self.check_result_auto_log2( " select t1,c5 from stb1 order by ts " , "select log(t1,2), log(c5,2) from stb1 order by ts" ) - self.check_result_auto_log2( " select t1,c5 from stb1 order by tbname " , "select log(t1,2) ,log(c5,2) from stb1 order by tbname" ) - self.check_result_auto_log2( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select log(t1,2) ,log(c5,2) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_log2( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select log(t1,2) , log(c5,2) from stb1 where c1 > 0 order by tbname" ) - pass + def support_super_table_test(self, dbname="db"): + + self.check_result_auto_log( 2 , f"select c5 from {dbname}.stb1 order by ts " , f"select log(c5,2) from {dbname}.stb1 order by ts" ) + self.check_result_auto_log( 2 ,f"select c5 from {dbname}.stb1 order by tbname " , f"select log(c5,2) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_log( 2 ,f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_log( 2 , f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) + + self.check_result_auto_log( 2 , f"select t1,c5 from {dbname}.stb1 order by ts " , f"select log(t1,2), log(c5,2) from {dbname}.stb1 order by ts" ) + self.check_result_auto_log( 2 , f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_log( 2 , f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_log( 2 ,f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) , log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() diff --git a/tests/system-test/2-query/lower.py b/tests/system-test/2-query/lower.py index 0917fb63fc638263849625aec5b907c05260f49f..0e33e3834ec9ecc50470f0793b29a3a4b84d4834 100644 --- a/tests/system-test/2-query/lower.py +++ b/tests/system-test/2-query/lower.py @@ -96,16 +96,16 @@ class TDTestCase: return sqls - def __test_current(self): + def __test_current(self, dbname="db"): tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: self.__lower_current_check(tb) tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") - def __test_error(self): + def __test_error(self, dbname="db"): tdLog.printNoPrefix("==========err sql condition check , must return error==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: for errsql in self.__lower_err_check(tb): @@ -113,22 +113,20 @@ class TDTestCase: tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") - def all_test(self): - self.__test_current() - self.__test_error() + def all_test(self, dbname="db"): + self.__test_current(dbname) + self.__test_error(dbname) - - def __create_tb(self): - tdSql.prepare() + def __create_tb(self, dbname="db"): tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table stb1( + create_stb_sql = f'''create table {dbname}.stb1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp - ) tags (t1 int) + ) tags (tag1 int) ''' - create_ntb_sql = f'''create table t1( + create_ntb_sql = f'''create table {dbname}.t1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp @@ -138,78 +136,78 @@ class TDTestCase: tdSql.execute(create_ntb_sql) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') - def __insert_data(self, rows): + def __insert_data(self, rows, dbname="db"): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) for i in range(rows): tdSql.execute( - f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f'''insert into ct1 values - ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } ) - ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } ) + f'''insert into {dbname}.ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) ''' ) tdSql.execute( - f'''insert into ct4 values + f'''insert into {dbname}.ct4 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, - { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000} + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} ) ( { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, - { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000} + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} ) ''' ) tdSql.execute( - f'''insert into ct2 values + f'''insert into {dbname}.ct2 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, - { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000 } + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } ) ( { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, - { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000 } + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } ) ''' ) for i in range(rows): - insert_data = f'''insert into t1 values + insert_data = f'''insert into {dbname}.t1 values ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, - "binary_{i}", "nchar_{i}", { now_time - 1000 * i } ) + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) ''' tdSql.execute(insert_data) tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, - "binary_limit-1", "nchar_limit-1", { now_time - 86400000 } + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } ) ( { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, - "binary_limit-2", "nchar_limit-2", { now_time - 172800000 } + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } ) ''' ) @@ -227,10 +225,7 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - tdDnodes.stop(1) - tdDnodes.start(1) - - tdSql.execute("use db") + tdSql.execute("flush database db") tdLog.printNoPrefix("==========step4:after wal, all check again ") self.all_test() diff --git a/tests/system-test/2-query/ltrim.py b/tests/system-test/2-query/ltrim.py index 15f40a09c3db67e4324e75768532221f55f2e35f..330f688990d614c1a824fd25741f19966e227581 100644 --- a/tests/system-test/2-query/ltrim.py +++ b/tests/system-test/2-query/ltrim.py @@ -23,6 +23,7 @@ CHAR_COL = [ BINARY_COL, NCHAR_COL, ] BOOLEAN_COL = [ BOOL_COL, ] TS_TYPE_COL = [ TS_COL, ] +DBNAME = "db" class TDTestCase: @@ -120,16 +121,16 @@ class TDTestCase: return sqls - def __test_current(self): # sourcery skip: use-itertools-product + def __test_current(self, dbname=DBNAME): # sourcery skip: use-itertools-product tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: self.__ltrim_check(tb) tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") - def __test_error(self): + def __test_error(self, dbname=DBNAME): tdLog.printNoPrefix("==========err sql condition check , must return error==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: for errsql in self.__ltrim_err_check(tb): @@ -142,17 +143,16 @@ class TDTestCase: self.__test_error() - def __create_tb(self): - tdSql.prepare() + def __create_tb(self, dbname=DBNAME): tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table stb1( + create_stb_sql = f'''create table {dbname}.stb1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp ) tags (t1 int) ''' - create_ntb_sql = f'''create table t1( + create_ntb_sql = f'''create table {dbname}.t1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp @@ -162,29 +162,29 @@ class TDTestCase: tdSql.execute(create_ntb_sql) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') - def __insert_data(self, rows): + def __insert_data(self, rows, dbname=DBNAME): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) for i in range(rows): tdSql.execute( - f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f'''insert into ct1 values + f'''insert into {dbname}.ct1 values ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) ''' ) tdSql.execute( - f'''insert into ct4 values + f'''insert into {dbname}.ct4 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -200,7 +200,7 @@ class TDTestCase: ) tdSql.execute( - f'''insert into ct2 values + f'''insert into {dbname}.ct2 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -216,13 +216,13 @@ class TDTestCase: ) for i in range(rows): - insert_data = f'''insert into t1 values + insert_data = f'''insert into {dbname}.t1 values ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) ''' tdSql.execute(insert_data) tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -251,8 +251,7 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - tdDnodes.stop(1) - tdDnodes.start(1) + tdSql.execute("flush database db") tdSql.execute("use db") diff --git a/tests/system-test/2-query/mavg.py b/tests/system-test/2-query/mavg.py index d7dc5e6143fe1e34dfc60555c3907beac1434842..b52217af9ac61e5a3c08d55b11e4219ec826b203 100644 --- a/tests/system-test/2-query/mavg.py +++ b/tests/system-test/2-query/mavg.py @@ -25,13 +25,13 @@ from util.cases import * from util.sql import * from util.dnodes import * - +dbname = 'db' class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor()) - def mavg_query_form(self, sel="select", func="mavg(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + def mavg_query_form(self, sel="select", func="mavg(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr=f"{dbname}.t1", condition=""): ''' mavg function: @@ -50,7 +50,7 @@ class TDTestCase: return f"{sel} {func} {col} {m_comm} {k} {r_comm} {alias} {fr} {table_expr} {condition}" - def checkmavg(self,sel="select", func="mavg(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + def checkmavg(self,sel="select", func="mavg(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr=f"{dbname}.t1", condition=""): # print(self.mavg_query_form(sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, # table_expr=table_expr, condition=condition)) line = sys._getframe().f_back.f_lineno @@ -62,7 +62,7 @@ class TDTestCase: table_expr=table_expr, condition=condition )) - sql = "select * from t1" + sql = f"select * from {dbname}.t1" collist = tdSql.getColNameList(sql) if not isinstance(col, str): @@ -307,7 +307,7 @@ class TDTestCase: pass - def mavg_current_query(self) : + def mavg_current_query(self, dbname="db") : # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) @@ -325,17 +325,17 @@ class TDTestCase: case6 = {"col": "c9"} self.checkmavg(**case6) - # # case7~8: nested query - # case7 = {"table_expr": "(select c1 from stb1)"} - # self.checkmavg(**case7) - # case8 = {"table_expr": "(select mavg(c1, 1) c1 from stb1 group by tbname)"} + # case7~8: nested query + case7 = {"table_expr": f"(select c1 from {dbname}.stb1)"} + self.checkmavg(**case7) + # case8 = {"table_expr": f"(select _c0, mavg(c1, 1) c1 from {dbname}.stb1 group by tbname)"} # self.checkmavg(**case8) # case9~10: mix with tbname/ts/tag/col - # case9 = {"alias": ", tbname"} - # self.checkmavg(**case9) - # case10 = {"alias": ", _c0"} - # self.checkmavg(**case10) + case9 = {"alias": ", tbname"} + self.checkmavg(**case9) + case10 = {"alias": ", _c0"} + self.checkmavg(**case10) # case11 = {"alias": ", st1"} # self.checkmavg(**case11) # case12 = {"alias": ", c1"} @@ -356,29 +356,29 @@ class TDTestCase: # case17: only support normal table join case17 = { "col": "t1.c1", - "table_expr": "t1, t2", + "table_expr": f"{dbname}.t1 t1, {dbname}.t2 t2", "condition": "where t1.ts=t2.ts" } self.checkmavg(**case17) # # case18~19: with group by # case19 = { - # "table_expr": "stb1", + # "table_expr": f"{dbname}.stb1", # "condition": "partition by tbname" # } # self.checkmavg(**case19) - # case20~21: with order by + # # case20~21: with order by # case20 = {"condition": "order by ts"} # self.checkmavg(**case20) - #case21 = { - # "table_expr": "stb1", - # "condition": "group by tbname order by tbname" - #} - #self.checkmavg(**case21) + case21 = { + "table_expr": f"{dbname}.stb1", + "condition": "group by tbname order by tbname" + } + self.checkmavg(**case21) # # case22: with union # case22 = { - # "condition": "union all select mavg( c1 , 1 ) from t2" + # "condition": f"union all select mavg( c1 , 1 ) from {dbname}.t2" # } # self.checkmavg(**case22) @@ -398,7 +398,7 @@ class TDTestCase: pass - def mavg_error_query(self) -> None : + def mavg_error_query(self, dbname="db") -> None : # unusual test # form test @@ -419,9 +419,9 @@ class TDTestCase: err8 = {"table_expr": ""} self.checkmavg(**err8) # no table_expr - # err9 = {"col": "st1"} + err9 = {"col": "st1"} # self.checkmavg(**err9) # col: tag - # err10 = {"col": 1} + err10 = {"col": 1} # self.checkmavg(**err10) # col: value err11 = {"col": "NULL"} self.checkmavg(**err11) # col: NULL @@ -486,32 +486,33 @@ class TDTestCase: #tdSql.query(" select mavg( c1 , 1 ) + 2 from t1 ") err41 = {"alias": "+ avg(c1)"} self.checkmavg(**err41) # mix with arithmetic 2 - err42 = {"alias": ", c1"} - self.checkmavg(**err42) # mix with other col - # err43 = {"table_expr": "stb1"} + # err42 = {"alias": ", c1"} + # self.checkmavg(**err42) # mix with other col + # err43 = {"table_expr": f"{dbname}.stb1"} # self.checkmavg(**err43) # select stb directly - err44 = { - "col": "stb1.c1", - "table_expr": "stb1, stb2", - "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" - } - self.checkmavg(**err44) # stb join + # err44 = { + # "col": "stb1.c1", + # "table_expr": "stb1, stb2", + # "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + # } + # self.checkmavg(**err44) # stb join + tdSql.query(f"select mavg( stb1.c1 , 1 ) from {dbname}.stb1 stb1, {dbname}.stb2 stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts;") err45 = { "condition": "where ts>0 and ts < now interval(1h) fill(next)" } self.checkmavg(**err45) # interval err46 = { - "table_expr": "t1", + "table_expr": f"{dbname}.t1", "condition": "group by c6" } self.checkmavg(**err46) # group by normal col err47 = { - "table_expr": "stb1", + "table_expr": f"{dbname}.stb1", "condition": "group by tbname slimit 1 " } # self.checkmavg(**err47) # with slimit err48 = { - "table_expr": "stb1", + "table_expr": f"{dbname}.stb1", "condition": "group by tbname slimit 1 soffset 1" } # self.checkmavg(**err48) # with soffset @@ -554,8 +555,8 @@ class TDTestCase: err67 = {"k": 0.999999} self.checkmavg(**err67) # k: left out of [1, 1000] err68 = { - "table_expr": "stb1", - "condition": "group by tbname order by tbname" # order by tbname not supported + "table_expr": f"{dbname}.stb1", + "condition": f"group by tbname order by tbname" # order by tbname not supported } self.checkmavg(**err68) @@ -565,42 +566,42 @@ class TDTestCase: for i in range(tbnum): for j in range(data_row): tdSql.execute( - f"insert into t{i} values (" + f"insert into {dbname}.t{i} values (" f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" ) tdSql.execute( - f"insert into t{i} values (" + f"insert into {dbname}.t{i} values (" f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" ) tdSql.execute( - f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + f"insert into {dbname}.tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" ) pass def mavg_test_table(self,tbnum: int) -> None : - tdSql.execute("drop database if exists db") - tdSql.execute("create database if not exists db keep 3650") - tdSql.execute("use db") + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname} keep 3650") + tdSql.execute(f"use {dbname}") tdSql.execute( - "create stable db.stb1 (\ + f"create stable {dbname}.stb1 (\ ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ ) \ tags(st1 int)" ) tdSql.execute( - "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + f"create stable {dbname}.stb2 (ts timestamp, c1 int) tags(st2 int)" ) for i in range(tbnum): - tdSql.execute(f"create table t{i} using stb1 tags({i})") - tdSql.execute(f"create table tt{i} using stb2 tags({i})") + tdSql.execute(f"create table {dbname}.t{i} using {dbname}.stb1 tags({i})") + tdSql.execute(f"create table {dbname}.tt{i} using {dbname}.stb2 tags({i})") pass @@ -617,25 +618,25 @@ class TDTestCase: tdLog.printNoPrefix("######## insert only NULL test:") for i in range(tbnum): - tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") - tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime + 5})") self.mavg_current_query() self.mavg_error_query() tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") # self.mavg_test_table(tbnum) - # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values " # f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") - # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values " # f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") # self.mavg_current_query() # self.mavg_error_query() tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") # self.mavg_test_table(tbnum) - # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values " # f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") - # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values " # f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") # self.mavg_current_query() # self.mavg_error_query() @@ -649,9 +650,9 @@ class TDTestCase: tdLog.printNoPrefix("######## insert data mix with NULL test:") for i in range(tbnum): - tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") - tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") - tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") self.mavg_current_query() self.mavg_error_query() @@ -664,67 +665,64 @@ class TDTestCase: tdDnodes.start(index) self.mavg_current_query() self.mavg_error_query() - tdSql.query("select mavg(1,1) from t1") + tdSql.query(f"select mavg(1,1) from {dbname}.t1") tdSql.checkRows(7) tdSql.checkData(0,0,1.000000000) tdSql.checkData(1,0,1.000000000) tdSql.checkData(5,0,1.000000000) - tdSql.query("select mavg(abs(c1),1) from t1") + tdSql.query(f"select mavg(abs(c1),1) from {dbname}.t1") tdSql.checkRows(4) def mavg_support_stable(self): - tdSql.query(" select mavg(1,3) from stb1 ") + tdSql.query(f" select mavg(1,3) from {dbname}.stb1 ") tdSql.checkRows(68) tdSql.checkData(0,0,1.000000000) - tdSql.query("select mavg(c1,3) from stb1 partition by tbname ") + tdSql.query(f"select mavg(c1,3) from {dbname}.stb1 partition by tbname ") tdSql.checkRows(20) - # tdSql.query("select mavg(st1,3) from stb1 partition by tbname") - # tdSql.checkRows(38) - tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname") + tdSql.query(f"select mavg(st1,3) from {dbname}.stb1 partition by tbname") + tdSql.checkRows(50) + tdSql.query(f"select mavg(st1+c1,3) from {dbname}.stb1 partition by tbname") tdSql.checkRows(20) - tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname") + tdSql.query(f"select mavg(st1+c1,3) from {dbname}.stb1 partition by tbname") tdSql.checkRows(20) - tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname") + tdSql.query(f"select mavg(st1+c1,3) from {dbname}.stb1 partition by tbname") tdSql.checkRows(20) - # # bug need fix - # tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname slimit 1 ") - # tdSql.checkRows(2) - # tdSql.error("select mavg(st1+c1,3) from stb1 partition by tbname limit 1 ") # bug need fix - tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname") + tdSql.query(f"select mavg(st1+c1,3) from {dbname}.stb1 partition by tbname") tdSql.checkRows(20) # bug need fix - # tdSql.query("select tbname , mavg(c1,3) from stb1 partition by tbname") - # tdSql.checkRows(38) - # tdSql.query("select tbname , mavg(st1,3) from stb1 partition by tbname") - # tdSql.checkRows(38) - # tdSql.query("select tbname , mavg(st1,3) from stb1 partition by tbname slimit 1") - # tdSql.checkRows(2) + tdSql.query(f"select tbname , mavg(c1,3) from {dbname}.stb1 partition by tbname") + tdSql.checkRows(20) + tdSql.query(f"select tbname , mavg(st1,3) from {dbname}.stb1 partition by tbname") + tdSql.checkRows(50) + tdSql.query(f"select tbname , mavg(st1,3) from {dbname}.stb1 partition by tbname slimit 1") + tdSql.checkRows(5) # partition by tags - # tdSql.query("select st1 , mavg(c1,3) from stb1 partition by st1") - # tdSql.checkRows(38) - # tdSql.query("select mavg(c1,3) from stb1 partition by st1") - # tdSql.checkRows(38) - # tdSql.query("select st1 , mavg(c1,3) from stb1 partition by st1 slimit 1") - # tdSql.checkRows(2) - # tdSql.query("select mavg(c1,3) from stb1 partition by st1 slimit 1") - # tdSql.checkRows(2) + tdSql.query(f"select st1 , mavg(c1,3) from {dbname}.stb1 partition by st1") + tdSql.checkRows(20) + tdSql.query(f"select mavg(c1,3) from {dbname}.stb1 partition by st1") + tdSql.checkRows(20) + tdSql.query(f"select st1 , mavg(c1,3) from {dbname}.stb1 partition by st1 slimit 1") + tdSql.checkRows(2) + tdSql.query(f"select mavg(c1,3) from {dbname}.stb1 partition by st1 slimit 1") + tdSql.checkRows(2) # partition by col - # tdSql.query("select c1 , mavg(c1,3) from stb1 partition by c1") - # tdSql.checkRows(38) - # tdSql.query("select mavg(c1 ,3) from stb1 partition by c1") - # tdSql.checkRows(38) - # tdSql.query("select c1 , mavg(c1,3) from stb1 partition by st1 slimit 1") - # tdSql.checkRows(2) - # tdSql.query("select diff(c1) from stb1 partition by st1 slimit 1") - # tdSql.checkRows(2) + tdSql.query(f"select c1 , mavg(c1,3) from {dbname}.stb1 partition by c1") + tdSql.checkRows(0) + tdSql.query(f"select c1 , mavg(c1,1) from {dbname}.stb1 partition by c1") + tdSql.checkRows(40) + tdSql.query(f"select c1, c2, c3, c4, mavg(c1,3) from {dbname}.stb1 partition by tbname ") + tdSql.checkRows(20) + tdSql.query(f"select c1, c2, c3, c4, mavg(123,3) from {dbname}.stb1 partition by tbname ") + tdSql.checkRows(50) + def run(self): import traceback diff --git a/tests/system-test/2-query/max.py b/tests/system-test/2-query/max.py index 34442a3725d69092535c02a509ba8cece4c10ed4..1cb08aeee8a0ddfcbdd18159d93b0e14780923e5 100644 --- a/tests/system-test/2-query/max.py +++ b/tests/system-test/2-query/max.py @@ -5,10 +5,7 @@ import numpy as np class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143, - "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } + updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor()) @@ -17,60 +14,57 @@ class TDTestCase: self.ts = 1537146000000 self.binary_str = 'taosdata' self.nchar_str = '涛思数据' - def max_check_stb_and_tb_base(self): + def max_check_stb_and_tb_base(self, dbname="db"): tdSql.prepare() intData = [] floatData = [] - tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, + tdSql.execute(f'''create table {dbname}.stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''') - tdSql.execute("create table stb_1 using stb tags('beijing')") + tdSql.execute(f"create table {dbname}.stb_1 using {dbname}.stb tags('beijing')") for i in range(self.rowNum): - tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" + tdSql.execute(f"insert into {dbname}.stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) intData.append(i + 1) floatData.append(i + 0.1) for i in ['ts','col11','col12','col13']: - for j in ['db.stb','stb','db.stb_1','stb_1']: - tdSql.error(f'select max({i} from {j} )') + for j in ['stb','stb_1']: + tdSql.error(f'select max({i} from {dbname}.{j} )') for i in range(1,11): - for j in ['db.stb','stb','db.stb_1','stb_1']: - tdSql.query(f"select max(col{i}) from {j}") + for j in ['stb', 'stb_1']: + tdSql.query(f"select max(col{i}) from {dbname}.{j}") if i<9: tdSql.checkData(0, 0, np.max(intData)) elif i>=9: tdSql.checkData(0, 0, np.max(floatData)) - tdSql.query("select max(col1) from stb_1 where col2<=5") + tdSql.query(f"select max(col1) from {dbname}.stb_1 where col2<=5") tdSql.checkData(0,0,5) - tdSql.query("select max(col1) from stb where col2<=5") + tdSql.query(f"select max(col1) from {dbname}.stb where col2<=5") tdSql.checkData(0,0,5) - tdSql.execute('drop database db') - def max_check_ntb_base(self): + def max_check_ntb_base(self, dbname="db"): tdSql.prepare() intData = [] floatData = [] - tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, + tdSql.execute(f'''create table {dbname}.ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20))''') for i in range(self.rowNum): - tdSql.execute(f"insert into ntb values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" + tdSql.execute(f"insert into {dbname}.ntb values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) intData.append(i + 1) floatData.append(i + 0.1) for i in ['ts','col11','col12','col13']: - for j in ['db.ntb','ntb']: - tdSql.error(f'select max({i} from {j} )') + for j in ['ntb']: + tdSql.error(f'select max({i} from {dbname}.{j} )') for i in range(1,11): - for j in ['db.ntb','ntb']: - tdSql.query(f"select max(col{i}) from {j}") + for j in ['ntb']: + tdSql.query(f"select max(col{i}) from {dbname}.{j}") if i<9: tdSql.checkData(0, 0, np.max(intData)) elif i>=9: tdSql.checkData(0, 0, np.max(floatData)) - tdSql.query("select max(col1) from ntb where col2<=5") + tdSql.query(f"select max(col1) from {dbname}.ntb where col2<=5") tdSql.checkData(0,0,5) - tdSql.execute('drop database db') - def check_max_functions(self, tbname , col_name): @@ -90,55 +84,55 @@ class TDTestCase: tdLog.info(" max function work as expected, sql : %s "% max_sql) - def support_distributed_aggregate(self): + def support_distributed_aggregate(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5") - tdSql.execute(" use testdb ") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") + tdSql.execute(f"use {dbname} ") tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) for i in range(1,21): if i ==1 or i == 4: continue else: - tbname = "ct"+f'{i}' + tbname = f"{dbname}.ct{i}" for j in range(9): tdSql.execute( f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -157,7 +151,7 @@ class TDTestCase: tdLog.info(" prepare data for distributed_aggregate done! ") # get vgroup_ids of all - tdSql.query("show vgroups ") + tdSql.query(f"show {dbname}.vgroups ") vgroups = tdSql.queryResult vnode_tables={} @@ -167,7 +161,7 @@ class TDTestCase: # check sub_table of per vnode ,make sure sub_table has been distributed - tdSql.query("select * from information_schema.ins_tables where db_name = 'testdb' and table_name like 'ct%'") + tdSql.query(f"select * from information_schema.ins_tables where db_name = '{dbname}' and table_name like 'ct%'") table_names = tdSql.queryResult tablenames = [] for table_name in table_names: @@ -182,13 +176,13 @@ class TDTestCase: # check max function work status - tdSql.query("show tables like 'ct%'") + tdSql.query(f"show {dbname}.tables like 'ct%'") table_names = tdSql.queryResult tablenames = [] for table_name in table_names: tablenames.append(table_name[0]) - tdSql.query("desc stb1") + tdSql.query(f"desc {dbname}.stb1") col_names = tdSql.queryResult colnames = [] @@ -198,11 +192,7 @@ class TDTestCase: for tablename in tablenames: for colname in colnames: - self.check_max_functions(tablename,colname) - - # max function with basic filter - print(vnode_tables) - + self.check_max_functions(f"{dbname}.{tablename}", colname) def run(self): diff --git a/tests/system-test/2-query/max_partition.py b/tests/system-test/2-query/max_partition.py index 4b9996d9c3b1d45f52e184f1da4ec8e59714feaa..01c267724210591e639753c3566c4826a5218813 100644 --- a/tests/system-test/2-query/max_partition.py +++ b/tests/system-test/2-query/max_partition.py @@ -12,16 +12,15 @@ class TDTestCase: self.tb_nums = 10 self.ts = 1537146000000 - def prepare_datas(self, stb_name , tb_nums , row_nums ): - tdSql.execute(" use db ") - tdSql.execute(f" create stable {stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\ + def prepare_datas(self, stb_name , tb_nums , row_nums, dbname="db" ): + tdSql.execute(f" create stable {dbname}.{stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\ uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags(t1 timestamp , t2 int , t3 bigint , t4 float , t5 double , t6 smallint , t7 tinyint , t8 bool , t9 binary(36)\ , t10 nchar(36) , t11 int unsigned , t12 bigint unsigned ,t13 smallint unsigned , t14 tinyint unsigned ) ") for i in range(tb_nums): - tbname = f"sub_{stb_name}_{i}" + tbname = f"{dbname}.sub_{stb_name}_{i}" ts = self.ts + i*10000 - tdSql.execute(f"create table {tbname} using {stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )") + tdSql.execute(f"create table {tbname} using {dbname}.{stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )") for row in range(row_nums): ts = self.ts + row*1000 @@ -31,191 +30,192 @@ class TDTestCase: ts = self.ts + row_nums*1000 + null*1000 tdSql.execute(f"insert into {tbname} values({ts} , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL )") - def basic_query(self): - tdSql.query("select count(*) from stb") + def basic_query(self, dbname="db"): + tdSql.query(f"select count(*) from {dbname}.stb") tdSql.checkData(0,0,(self.row_nums + 5 )*self.tb_nums) - tdSql.query("select max(c1) from stb") + tdSql.query(f"select max(c1) from {dbname}.stb") tdSql.checkData(0,0,(self.row_nums -1)) - tdSql.query(" select tbname , max(c1) from stb partition by tbname ") + tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname ") tdSql.checkRows(self.tb_nums) - tdSql.query(" select max(c1) from stb group by t1 order by t1 ") + tdSql.query(f"select max(c1) from {dbname}.stb group by t1 order by t1 ") tdSql.checkRows(self.tb_nums) - tdSql.query(" select max(c1) from stb group by c1 order by t1 ") - tdSql.query(" select max(t2) from stb group by c1 order by t1 ") - tdSql.query(" select max(c1) from stb group by tbname order by tbname ") + tdSql.query(f"select max(c1) from {dbname}.stb group by c1 order by t1 ") + tdSql.query(f"select max(t2) from {dbname}.stb group by c1 order by t1 ") + tdSql.query(f"select max(c1) from {dbname}.stb group by tbname order by tbname ") tdSql.checkRows(self.tb_nums) # bug need fix - tdSql.query(" select max(t2) from stb group by t2 order by t2 ") + tdSql.query(f"select max(t2) from {dbname}.stb group by t2 order by t2 ") tdSql.checkRows(self.tb_nums) - tdSql.query(" select max(c1) from stb group by c1 order by c1 ") + tdSql.query(f"select max(c1) from {dbname}.stb group by c1 order by c1 ") tdSql.checkRows(self.row_nums+1) - tdSql.query(" select c1 , max(c1) from stb group by c1 order by c1 ") + tdSql.query(f"select c1 , max(c1) from {dbname}.stb group by c1 order by c1 ") tdSql.checkRows(self.row_nums+1) # support selective functions - tdSql.query(" select c1 ,c2 ,c3 , max(c1) ,c4 ,c5 ,t11 from stb group by c1 order by c1 desc ") + tdSql.query(f"select c1 ,c2 ,c3 , max(c1) ,c4 ,c5 ,t11 from {dbname}.stb group by c1 order by c1 desc ") tdSql.checkRows(self.row_nums+1) - tdSql.query(" select c1, tbname , max(c1) ,c4 ,c5 ,t11 from stb group by c1 order by c1 desc ") + tdSql.query(f"select c1, tbname , max(c1) ,c4 ,c5 ,t11 from {dbname}.stb group by c1 order by c1 desc ") tdSql.checkRows(self.row_nums+1) # bug need fix - # tdSql.query(" select tbname , max(c1) from sub_stb_1 where c1 is null group by c1 order by c1 desc ") - # tdSql.checkRows(1) - # tdSql.checkData(0,0,"sub_stb_1") + tdSql.query(f"select tbname , max(c1) from {dbname}.sub_stb_1 where c1 is null group by c1 order by c1 desc ") + tdSql.checkRows(1) + tdSql.checkData(0,0,"sub_stb_1") - tdSql.query("select max(c1) ,c2 ,t2,tbname from stb group by abs(c1) order by abs(c1)") + tdSql.query(f"select max(c1) ,c2 ,t2,tbname from {dbname}.stb group by abs(c1) order by abs(c1)") tdSql.checkRows(self.row_nums+1) - tdSql.query("select abs(c1+c3), count(c1+c3) ,max(c1+t2) from stb group by abs(c1+c3) order by abs(c1+c3)") + tdSql.query(f"select abs(c1+c3), count(c1+c3) ,max(c1+t2) from {dbname}.stb group by abs(c1+c3) order by abs(c1+c3)") tdSql.checkRows(self.row_nums+1) - tdSql.query("select max(c1+c3)+min(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)") + tdSql.query(f"select max(c1+c3)+min(c2) ,abs(c1) from {dbname}.stb group by abs(c1) order by abs(c1)") tdSql.checkRows(self.row_nums+1) - tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) ,abs(t1) from stb group by abs(c1) order by abs(t1)+c2") - tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)+c2") - tdSql.query("select abs(c1+c3)+abs(c2) , count(c1+c3)+max(c2) from stb group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)") + tdSql.error(f"select count(c1+c3)+max(c2) ,abs(c1) ,abs(t1) from {dbname}.stb group by abs(c1) order by abs(t1)+c2") + tdSql.error(f"select count(c1+c3)+max(c2) ,abs(c1) from {dbname}.stb group by abs(c1) order by abs(c1)+c2") + tdSql.query(f"select abs(c1+c3)+abs(c2) , count(c1+c3)+max(c2) from {dbname}.stb group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)") tdSql.checkRows(self.row_nums+1) - tdSql.query(" select max(c1) , max(t2) from stb where abs(c1+t2)=1 partition by tbname ") + tdSql.query(f"select max(c1) , max(t2) from {dbname}.stb where abs(c1+t2)=1 partition by tbname ") tdSql.checkRows(2) - tdSql.query(" select max(c1) from stb where abs(c1+t2)=1 partition by tbname ") + tdSql.query(f"select max(c1) from {dbname}.stb where abs(c1+t2)=1 partition by tbname ") tdSql.checkRows(2) - tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname ") + tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname order by tbname ") tdSql.checkRows(self.tb_nums) tdSql.checkData(0,1,self.row_nums-1) - tdSql.query("select tbname , max(c2) from stb partition by t1 order by t1") - tdSql.query("select tbname , max(t2) from stb partition by t1 order by t1") - tdSql.query("select tbname , max(t2) from stb partition by t2 order by t2") + tdSql.query(f"select tbname , max(c2) from {dbname}.stb partition by t1 order by t1") + tdSql.query(f"select tbname , max(t2) from {dbname}.stb partition by t1 order by t1") + tdSql.query(f"select tbname , max(t2) from {dbname}.stb partition by t2 order by t2") # # bug need fix - tdSql.query("select t2 , max(t2) from stb partition by t2 order by t2") + tdSql.query(f"select t2 , max(t2) from {dbname}.stb partition by t2 order by t2") tdSql.checkRows(self.tb_nums) - tdSql.query("select tbname , max(c1) from stb partition by tbname order by tbname") + tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname order by tbname") tdSql.checkRows(self.tb_nums) tdSql.checkData(0,1,self.row_nums-1) - tdSql.query("select tbname , max(c1) from stb partition by t2 order by t2") + tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by t2 order by t2") - tdSql.query("select c2, max(c1) from stb partition by c2 order by c2 desc") + tdSql.query(f"select c2, max(c1) from {dbname}.stb partition by c2 order by c2 desc") tdSql.checkRows(self.tb_nums+1) tdSql.checkData(0,1,self.row_nums-1) - tdSql.query("select tbname , max(c1) from stb partition by c1 order by c2") + tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by c1 order by c2") - tdSql.query("select tbname , abs(t2) from stb partition by c2 order by t2") + tdSql.query(f"select tbname , abs(t2) from {dbname}.stb partition by c2 order by t2") tdSql.checkRows(self.tb_nums*(self.row_nums+5)) - tdSql.query("select max(c1) , count(t2) from stb partition by c2 ") + tdSql.query(f"select max(c1) , count(t2) from {dbname}.stb partition by c2 ") tdSql.checkRows(self.row_nums+1) tdSql.checkData(0,1,self.row_nums) - tdSql.query("select count(c1) , max(t2) ,c2 from stb partition by c2 order by c2") + tdSql.query(f"select count(c1) , max(t2) ,c2 from {dbname}.stb partition by c2 order by c2") tdSql.checkRows(self.row_nums+1) - tdSql.query("select count(c1) , count(t1) ,max(c2) ,tbname from stb partition by tbname order by tbname") + tdSql.query(f"select count(c1) , count(t1) ,max(c2) ,tbname from {dbname}.stb partition by tbname order by tbname") tdSql.checkRows(self.tb_nums) tdSql.checkCols(4) - tdSql.query("select count(c1) , max(t2) ,t1 from stb partition by t1 order by t1") + tdSql.query(f"select count(c1) , max(t2) ,t1 from {dbname}.stb partition by t1 order by t1") tdSql.checkRows(self.tb_nums) tdSql.checkData(0,0,self.row_nums) # bug need fix - tdSql.query("select count(c1) , max(t2) ,abs(c1) from stb partition by abs(c1) order by abs(c1)") + tdSql.query(f"select count(c1) , max(t2) ,abs(c1) from {dbname}.stb partition by abs(c1) order by abs(c1)") tdSql.checkRows(self.row_nums+1) - tdSql.query("select max(ceil(c2)) , max(floor(t2)) ,max(floor(c2)) from stb partition by abs(c2) order by abs(c2)") + tdSql.query(f"select max(ceil(c2)) , max(floor(t2)) ,max(floor(c2)) from {dbname}.stb partition by abs(c2) order by abs(c2)") tdSql.checkRows(self.row_nums+1) - tdSql.query("select max(ceil(c1-2)) , max(floor(t2+1)) ,max(c2-c1) from stb partition by abs(floor(c1)) order by abs(floor(c1))") + tdSql.query(f"select max(ceil(c1-2)) , max(floor(t2+1)) ,max(c2-c1) from {dbname}.stb partition by abs(floor(c1)) order by abs(floor(c1))") tdSql.checkRows(self.row_nums+1) - tdSql.query("select tbname , max(c1) ,c1 from stb partition by tbname order by tbname") + tdSql.query(f"select tbname , max(c1) ,c1 from {dbname}.stb partition by tbname order by tbname") tdSql.checkRows(self.tb_nums) tdSql.checkData(0,0,'sub_stb_0') tdSql.checkData(0,1,9) tdSql.checkData(0,2,9) - tdSql.query("select tbname ,top(c1,1) ,c1 from stb partition by tbname order by tbname") + tdSql.query(f"select tbname ,top(c1,1) ,c1 from {dbname}.stb partition by tbname order by tbname") tdSql.checkRows(self.tb_nums) - tdSql.query(" select c1 , sample(c1,2) from stb partition by tbname order by tbname ") + tdSql.query(f"select c1 , sample(c1,2) from {dbname}.stb partition by tbname order by tbname ") tdSql.checkRows(self.tb_nums*2) # interval - tdSql.query("select max(c1) from stb interval(2s) sliding(1s)") + tdSql.query(f"select max(c1) from {dbname}.stb interval(2s) sliding(1s)") # bug need fix - tdSql.query('select max(c1) from stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)') + tdSql.query(f'select max(c1) from {dbname}.stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)') - tdSql.query(" select tbname , count(c1) from stb partition by tbname interval(10s) slimit 5 soffset 1 ") + tdSql.query(f"select tbname , count(c1) from {dbname}.stb partition by tbname interval(10s) slimit 5 soffset 1 ") - tdSql.query("select tbname , max(c1) from stb partition by tbname interval(10s)") + tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname interval(10s)") tdSql.checkRows(self.row_nums*2) - tdSql.query("select unique(c1) from stb partition by tbname order by tbname") + tdSql.query(f"select unique(c1) from {dbname}.stb partition by tbname order by tbname") - tdSql.query("select tbname , count(c1) from sub_stb_1 partition by tbname interval(10s)") + tdSql.query(f"select tbname , count(c1) from {dbname}.sub_stb_1 partition by tbname interval(10s)") tdSql.checkData(0,0,'sub_stb_1') tdSql.checkData(0,1,self.row_nums) - tdSql.query("select c1 , mavg(c1 ,2 ) from stb partition by c1") + tdSql.query(f"select c1 , mavg(c1 ,2 ) from {dbname}.stb partition by c1") tdSql.checkRows(90) - tdSql.query("select c1 , diff(c1 , 0) from stb partition by c1") + tdSql.query(f"select c1 , diff(c1 , 0) from {dbname}.stb partition by c1") tdSql.checkRows(90) - tdSql.query("select c1 , csum(c1) from stb partition by c1") + tdSql.query(f"select c1 , csum(c1) from {dbname}.stb partition by c1") tdSql.checkRows(100) - tdSql.query("select c1 , sample(c1,2) from stb partition by c1 order by c1") + tdSql.query(f"select c1 , sample(c1,2) from {dbname}.stb partition by c1 order by c1") tdSql.checkRows(21) # bug need fix - # tdSql.checkData(0,1,None) + tdSql.checkData(0,1,None) - tdSql.query("select c1 , twa(c1) from stb partition by c1 order by c1") + tdSql.query(f"select c1 , twa(c1) from {dbname}.stb partition by c1 order by c1") tdSql.checkRows(11) tdSql.checkData(0,1,None) - tdSql.query("select c1 , irate(c1) from stb partition by c1 order by c1") + tdSql.query(f"select c1 , irate(c1) from {dbname}.stb partition by c1 order by c1") tdSql.checkRows(11) tdSql.checkData(0,1,None) - tdSql.query("select c1 , DERIVATIVE(c1,2,1) from stb partition by c1 order by c1") + tdSql.query(f"select c1 , DERIVATIVE(c1,2,1) from {dbname}.stb partition by c1 order by c1") tdSql.checkRows(90) # bug need fix tdSql.checkData(0,1,None) - tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ") + tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname order by tbname slimit 5 soffset 0 ") tdSql.checkRows(10) - tdSql.query(" select tbname , max(c1) from sub_stb_1 partition by tbname interval(10s) sliding(5s) ") + tdSql.query(f"select tbname , max(c1) from {dbname}.sub_stb_1 partition by tbname interval(10s) sliding(5s) ") - tdSql.query(f'select max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)') - tdSql.query(f'select tbname , max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)') + tdSql.query(f'select max(c1) from {dbname}.stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)') + tdSql.query(f'select tbname , max(c1) from {dbname}.stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)') def run(self): + dbname = "db" tdSql.prepare() self.prepare_datas("stb",self.tb_nums,self.row_nums) self.basic_query() # # coverage case for taosd crash about bug fix - tdSql.query(" select sum(c1) from stb where t2+10 >1 ") - tdSql.query(" select count(c1),count(t1) from stb where -t2<1 ") - tdSql.query(" select tbname ,max(ceil(c1)) from stb group by tbname ") - tdSql.query(" select avg(abs(c1)) , tbname from stb group by tbname ") - tdSql.query(" select t1,c1 from stb where abs(t2+c1)=1 ") + tdSql.query(f"select sum(c1) from {dbname}.stb where t2+10 >1 ") + tdSql.query(f"select count(c1),count(t1) from {dbname}.stb where -t2<1 ") + tdSql.query(f"select tbname ,max(ceil(c1)) from {dbname}.stb group by tbname ") + tdSql.query(f"select avg(abs(c1)) , tbname from {dbname}.stb group by tbname ") + tdSql.query(f"select t1,c1 from {dbname}.stb where abs(t2+c1)=1 ") def stop(self): diff --git a/tests/system-test/2-query/min.py b/tests/system-test/2-query/min.py index c27e9926ff52e178afe230872d70c6ab269d6983..c47fa49237e5305f80a411e1748bacbe82e8f835 100644 --- a/tests/system-test/2-query/min.py +++ b/tests/system-test/2-query/min.py @@ -14,196 +14,124 @@ class TDTestCase: self.ts = 1537146000000 def run(self): + dbname = "db" tdSql.prepare() intData = [] floatData = [] - tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + tdSql.execute(f'''create table {dbname}.stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') - tdSql.execute("create table stb_1 using stb tags('beijing')") - tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + tdSql.execute(f"create table {dbname}.stb_1 using {dbname}.stb tags('beijing')") + tdSql.execute(f'''create table {dbname}.ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') for i in range(self.rowNum): - tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + tdSql.execute(f"insert into {dbname}.ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) intData.append(i + 1) floatData.append(i + 0.1) for i in range(self.rowNum): - tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + tdSql.execute(f"insert into {dbname}.stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) intData.append(i + 1) floatData.append(i + 0.1) # max verifacation - tdSql.error("select min(ts) from stb_1") - tdSql.error("select min(ts) from db.stb_1") - tdSql.error("select min(col7) from stb_1") - tdSql.error("select min(col7) from db.stb_1") - tdSql.error("select min(col8) from stb_1") - tdSql.error("select min(col8) from db.stb_1") - tdSql.error("select min(col9) from stb_1") - tdSql.error("select min(col9) from db.stb_1") - # tdSql.error("select min(a) from stb_1") - # tdSql.error("select min(1) from stb_1") - tdSql.error("select min(now()) from stb_1") - tdSql.error("select min(count(c1),count(c2)) from stb_1") + tdSql.error(f"select min(ts) from {dbname}.stb_1") + tdSql.error(f"select min(col7) from {dbname}.stb_1") + tdSql.error(f"select min(col8) from {dbname}.stb_1") + tdSql.error(f"select min(col9) from {dbname}.stb_1") + tdSql.error(f"select min(a) from {dbname}.stb_1") + tdSql.query(f"select min(1) from {dbname}.stb_1") + tdSql.error(f"select min(now()) from {dbname}.stb_1") + tdSql.error(f"select min(count(c1),count(c2)) from {dbname}.stb_1") - tdSql.query("select min(col1) from stb_1") + tdSql.query(f"select min(col1) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col1) from db.stb_1") + tdSql.query(f"select min(col2) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col2) from stb_1") + tdSql.query(f"select min(col3) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col2) from db.stb_1") + tdSql.query(f"select min(col4) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col3) from stb_1") + tdSql.query(f"select min(col11) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col3) from db.stb_1") + tdSql.query(f"select min(col12) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col4) from stb_1") + tdSql.query(f"select min(col13) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col4) from db.stb_1") + tdSql.query(f"select min(col14) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col11) from stb_1") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col11) from db.stb_1") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col12) from stb_1") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col12) from db.stb_1") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col13) from stb_1") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col13) from db.stb_1") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col14) from stb_1") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col14) from db.stb_1") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col5) from stb_1") + tdSql.query(f"select min(col5) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col5) from db.stb_1") + tdSql.query(f"select min(col6) from {dbname}.stb_1") tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col6) from stb_1") - tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col6) from db.stb_1") - tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col1) from stb_1 where col2>=5") + tdSql.query(f"select min(col1) from {dbname}.stb_1 where col2>=5") tdSql.checkData(0,0,5) - tdSql.error("select min(ts) from stb_1") - tdSql.error("select min(ts) from db.stb_1") - tdSql.error("select min(col7) from stb_1") - tdSql.error("select min(col7) from db.stb_1") - tdSql.error("select min(col8) from stb_1") - tdSql.error("select min(col8) from db.stb_1") - tdSql.error("select min(col9) from stb_1") - tdSql.error("select min(col9) from db.stb_1") - # tdSql.error("select min(a) from stb_1") - # tdSql.error("select min(1) from stb_1") - tdSql.error("select min(now()) from stb_1") - tdSql.error("select min(count(c1),count(c2)) from stb_1") + tdSql.error(f"select min(ts) from {dbname}.stb_1") + tdSql.error(f"select min(col7) from {dbname}.stb_1") + tdSql.error(f"select min(col8) from {dbname}.stb_1") + tdSql.error(f"select min(col9) from {dbname}.stb_1") + tdSql.error(f"select min(a) from {dbname}.stb_1") + tdSql.query(f"select min(1) from {dbname}.stb_1") + tdSql.error(f"select min(now()) from {dbname}.stb_1") + tdSql.error(f"select min(count(c1),count(c2)) from {dbname}.stb_1") - tdSql.query("select min(col1) from stb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col1) from db.stb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col2) from stb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col2) from db.stb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col3) from stb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col3) from db.stb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col4) from stb") + tdSql.query(f"select min(col1) from {dbname}.stb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col4) from db.stb") + tdSql.query(f"select min(col2) from {dbname}.stb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col11) from stb") + tdSql.query(f"select min(col3) from {dbname}.stb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col11) from db.stb") + tdSql.query(f"select min(col4) from {dbname}.stb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col12) from stb") + tdSql.query(f"select min(col11) from {dbname}.stb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col12) from db.stb") + tdSql.query(f"select min(col12) from {dbname}.stb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col13) from stb") + tdSql.query(f"select min(col13) from {dbname}.stb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col13) from db.stb") + tdSql.query(f"select min(col14) from {dbname}.stb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col14) from stb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col14) from db.stb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col5) from stb") - tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col5) from db.stb") - tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col6) from stb") + tdSql.query(f"select min(col5) from {dbname}.stb") tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col6) from db.stb") + tdSql.query(f"select min(col6) from {dbname}.stb") tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col1) from stb where col2>=5") + tdSql.query(f"select min(col1) from {dbname}.stb where col2>=5") tdSql.checkData(0,0,5) + tdSql.error(f"select min(ts) from {dbname}.ntb") + tdSql.error(f"select min(col7) from {dbname}.ntb") + tdSql.error(f"select min(col8) from {dbname}.ntb") + tdSql.error(f"select min(col9) from {dbname}.ntb") + tdSql.error(f"select min(a) from {dbname}.ntb") + tdSql.query(f"select min(1) from {dbname}.ntb") + tdSql.error(f"select min(now()) from {dbname}.ntb") + tdSql.error(f"select min(count(c1),count(c2)) from {dbname}.ntb") - tdSql.error("select min(ts) from ntb") - tdSql.error("select min(ts) from db.ntb") - tdSql.error("select min(col7) from ntb") - tdSql.error("select min(col7) from db.ntb") - tdSql.error("select min(col8) from ntb") - tdSql.error("select min(col8) from db.ntb") - tdSql.error("select min(col9) from ntb") - tdSql.error("select min(col9) from db.ntb") - # tdSql.error("select min(a) from stb_1") - # tdSql.error("select min(1) from stb_1") - tdSql.error("select min(now()) from ntb") - tdSql.error("select min(count(c1),count(c2)) from ntb") - - tdSql.query("select min(col1) from ntb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col1) from db.ntb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col2) from ntb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col2) from db.ntb") + tdSql.query(f"select min(col1) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col3) from ntb") + tdSql.query(f"select min(col2) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col3) from db.ntb") + tdSql.query(f"select min(col3) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col4) from ntb") + tdSql.query(f"select min(col4) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col4) from db.ntb") + tdSql.query(f"select min(col11) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col11) from ntb") + tdSql.query(f"select min(col12) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col11) from db.ntb") + tdSql.query(f"select min(col13) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col12) from ntb") + tdSql.query(f"select min(col14) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col12) from db.ntb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col13) from ntb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col13) from db.ntb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col14) from ntb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col14) from db.ntb") - tdSql.checkData(0, 0, np.min(intData)) - tdSql.query("select min(col5) from ntb") - tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col5) from db.ntb") - tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col6) from ntb") + tdSql.query(f"select min(col5) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col6) from db.ntb") + tdSql.query(f"select min(col6) from {dbname}.ntb") tdSql.checkData(0, 0, np.min(floatData)) - tdSql.query("select min(col1) from ntb where col2>=5") + tdSql.query(f"select min(col1) from {dbname}.ntb where col2>=5") tdSql.checkData(0,0,5) diff --git a/tests/system-test/2-query/nestedQuery_str.py b/tests/system-test/2-query/nestedQuery_str.py index 0d40ef8147eabe133973a15607c340243b69db92..931ff873dcce279d8ddff018549beb648c5cfbc4 100755 --- a/tests/system-test/2-query/nestedQuery_str.py +++ b/tests/system-test/2-query/nestedQuery_str.py @@ -24,9 +24,6 @@ from util.dnodes import tdDnodes from util.dnodes import * class TDTestCase: - updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) diff --git a/tests/system-test/2-query/pow.py b/tests/system-test/2-query/pow.py index 1af8bd3839beafe37f690abf14d85f3c0e224cb2..0702d05c0b7bf0989046ab1cfdfaa0d812c78407 100644 --- a/tests/system-test/2-query/pow.py +++ b/tests/system-test/2-query/pow.py @@ -9,48 +9,46 @@ from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} + def init(self, conn, powSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - - def prepare_datas(self): + + def prepare_datas(self, dbname="db"): tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) - + tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -65,257 +63,182 @@ class TDTestCase: ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ''' ) - - def check_result_auto_pow2(self ,origin_query , pow_query): + def check_result_auto_pow(self ,base , origin_query , pow_query): pow_result = tdSql.getResult(pow_query) origin_result = tdSql.getResult(origin_query) auto_result =[] - - for row in origin_result: - row_check = [] - for elem in row: - if elem == None: - elem = None - else: - elem = math.pow(elem,2) - row_check.append(elem) - auto_result.append(row_check) - - check_status = True - for row_index , row in enumerate(pow_result): - for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): - check_status = False - elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): - check_status = False - else: - pass - if not check_status: - tdLog.notice("pow function value has not as expected , sql is \"%s\" "%pow_query ) - sys.exit(1) - else: - tdLog.info("pow value check pass , it work as expected ,sql is \"%s\" "%pow_query ) - - def check_result_auto_pow1(self ,origin_query , pow_query): - pow_result = tdSql.getResult(pow_query) - origin_result = tdSql.getResult(origin_query) - - auto_result =[] - for row in origin_result: row_check = [] for elem in row: if elem == None: elem = None else : - elem = pow(elem ,1) + elem = float(pow(elem ,base)) row_check.append(elem) auto_result.append(row_check) - check_status = True + tdSql.query(pow_query) for row_index , row in enumerate(pow_result): for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): - check_status = False - elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): - check_status = False - else: - pass - if not check_status: - tdLog.notice("pow function value has not as expected , sql is \"%s\" "%pow_query ) - sys.exit(1) - else: - tdLog.info("pow value check pass , it work as expected ,sql is \"%s\" "%pow_query ) - - def check_result_auto_pow__10(self ,origin_query , pow_query): - pow_result = tdSql.getResult(pow_query) - origin_result = tdSql.getResult(origin_query) + tdSql.checkData(row_index,col_index ,auto_result[row_index][col_index]) + - auto_result =[] - - for row in origin_result: - row_check = [] - for elem in row: - if elem == None: - elem = None - elif elem == 0: - elem = None - else: - elem = pow(elem ,-10) - row_check.append(elem) - auto_result.append(row_check) - - check_status = True - for row_index , row in enumerate(pow_result): - for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): - check_status = False - elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): - check_status = False - else: - pass - if not check_status: - tdLog.notice("pow function value has not as expected , sql is \"%s\" "%pow_query ) - sys.exit(1) - else: - tdLog.info("pow value check pass , it work as expected ,sql is \"%s\" "%pow_query ) - - def test_errors(self): + def test_errors(self, dbname="db"): error_sql_lists = [ - "select pow from t1", - # "select pow(-+--+c1 ,2) from t1", - # "select +-pow(c1,2) from t1", - # "select ++-pow(c1,2) from t1", - # "select ++--pow(c1,2) from t1", - # "select - -pow(c1,2)*0 from t1", - # "select pow(tbname+1,2) from t1 ", - "select pow(123--123,2)==1 from t1", - "select pow(c1,2) as 'd1' from t1", - "select pow(c1 ,c2 ,2) from t1", - "select pow(c1 ,NULL ,2) from t1", - "select pow(, 2) from t1;", - "select pow(pow(c1, 2) ab from t1)", - "select pow(c1 ,2 ) as int from t1", - "select pow from stb1", - # "select pow(-+--+c1) from stb1", - # "select +-pow(c1) from stb1", - # "select ++-pow(c1) from stb1", - # "select ++--pow(c1) from stb1", - # "select - -pow(c1)*0 from stb1", - # "select pow(tbname+1) from stb1 ", - "select pow(123--123 ,2)==1 from stb1", - "select pow(c1 ,2) as 'd1' from stb1", - "select pow(c1 ,c2 ,2 ) from stb1", - "select pow(c1 ,NULL,2) from stb1", - "select pow(,) from stb1;", - "select pow(pow(c1 , 2) ab from stb1)", - "select pow(c1 , 2) as int from stb1" + f"select pow from {dbname}.t1", + # f"select pow(-+--+c1 ,2) from {dbname}.t1", + # f"select +-pow(c1,2) from {dbname}.t1", + # f"select ++-pow(c1,2) from {dbname}.t1", + # f"select ++--pow(c1,2) from {dbname}.t1", + # f"select - -pow(c1,2)*0 from {dbname}.t1", + # f"select pow(tbname+1,2) from {dbname}.t1 ", + f"select pow(123--123,2)==1 from {dbname}.t1", + f"select pow(c1,2) as 'd1' from {dbname}.t1", + f"select pow(c1 ,c2 ,2) from {dbname}.t1", + f"select pow(c1 ,NULL ,2) from {dbname}.t1", + f"select pow(, 2) from {dbname}.t1;", + f"select pow(pow(c1, 2) ab from {dbname}.t1)", + f"select pow(c1 ,2 ) as int from {dbname}.t1", + f"select pow from {dbname}.stb1", + # f"select pow(-+--+c1) from {dbname}.stb1", + # f"select +-pow(c1) from {dbname}.stb1", + # f"select ++-pow(c1) from {dbname}.stb1", + # f"select ++--pow(c1) from {dbname}.stb1", + # f"select - -pow(c1)*0 from {dbname}.stb1", + # f"select pow(tbname+1) from {dbname}.stb1 ", + f"select pow(123--123 ,2)==1 from {dbname}.stb1", + f"select pow(c1 ,2) as 'd1' from {dbname}.stb1", + f"select pow(c1 ,c2 ,2 ) from {dbname}.stb1", + f"select pow(c1 ,NULL,2) from {dbname}.stb1", + f"select pow(,) from {dbname}.stb1;", + f"select pow(pow(c1 , 2) ab from {dbname}.stb1)", + f"select pow(c1 , 2) as int from {dbname}.stb1" ] for error_sql in error_sql_lists: tdSql.error(error_sql) - - def support_types(self): + + def support_types(self, dbname="db"): type_error_sql_lists = [ - "select pow(ts ,2 ) from t1" , - "select pow(c7,c1 ) from t1", - "select pow(c8,c2) from t1", - "select pow(c9,c3 ) from t1", - "select pow(ts,c4 ) from ct1" , - "select pow(c7,c5 ) from ct1", - "select pow(c8,c6 ) from ct1", - "select pow(c9,c8 ) from ct1", - "select pow(ts,2 ) from ct3" , - "select pow(c7,2 ) from ct3", - "select pow(c8,2 ) from ct3", - "select pow(c9,2 ) from ct3", - "select pow(ts,2 ) from ct4" , - "select pow(c7,2 ) from ct4", - "select pow(c8,2 ) from ct4", - "select pow(c9,2 ) from ct4", - "select pow(ts,2 ) from stb1" , - "select pow(c7,2 ) from stb1", - "select pow(c8,2 ) from stb1", - "select pow(c9,2 ) from stb1" , - - "select pow(ts,2 ) from stbbb1" , - "select pow(c7,2 ) from stbbb1", - - "select pow(ts,2 ) from tbname", - "select pow(c9,2 ) from tbname" + f"select pow(ts ,2 ) from {dbname}.t1" , + f"select pow(c7,c1 ) from {dbname}.t1", + f"select pow(c8,c2) from {dbname}.t1", + f"select pow(c9,c3 ) from {dbname}.t1", + f"select pow(ts,c4 ) from {dbname}.ct1" , + f"select pow(c7,c5 ) from {dbname}.ct1", + f"select pow(c8,c6 ) from {dbname}.ct1", + f"select pow(c9,c8 ) from {dbname}.ct1", + f"select pow(ts,2 ) from {dbname}.ct3" , + f"select pow(c7,2 ) from {dbname}.ct3", + f"select pow(c8,2 ) from {dbname}.ct3", + f"select pow(c9,2 ) from {dbname}.ct3", + f"select pow(ts,2 ) from {dbname}.ct4" , + f"select pow(c7,2 ) from {dbname}.ct4", + f"select pow(c8,2 ) from {dbname}.ct4", + f"select pow(c9,2 ) from {dbname}.ct4", + f"select pow(ts,2 ) from {dbname}.stb1" , + f"select pow(c7,2 ) from {dbname}.stb1", + f"select pow(c8,2 ) from {dbname}.stb1", + f"select pow(c9,2 ) from {dbname}.stb1" , + + f"select pow(ts,2 ) from {dbname}.stbbb1" , + f"select pow(c7,2 ) from {dbname}.stbbb1", + + f"select pow(ts,2 ) from {dbname}.tbname", + f"select pow(c9,2 ) from {dbname}.tbname" ] - + for type_sql in type_error_sql_lists: tdSql.error(type_sql) - - + + type_sql_lists = [ - "select pow(c1,2 ) from t1", - "select pow(c2,2 ) from t1", - "select pow(c3,2 ) from t1", - "select pow(c4,2 ) from t1", - "select pow(c5,2 ) from t1", - "select pow(c6,2 ) from t1", - - "select pow(c1,2 ) from ct1", - "select pow(c2,2 ) from ct1", - "select pow(c3,2 ) from ct1", - "select pow(c4,2 ) from ct1", - "select pow(c5,2 ) from ct1", - "select pow(c6,2 ) from ct1", - - "select pow(c1,2 ) from ct3", - "select pow(c2,2 ) from ct3", - "select pow(c3,2 ) from ct3", - "select pow(c4,2 ) from ct3", - "select pow(c5,2 ) from ct3", - "select pow(c6,2 ) from ct3", - - "select pow(c1,2 ) from stb1", - "select pow(c2,2 ) from stb1", - "select pow(c3,2 ) from stb1", - "select pow(c4,2 ) from stb1", - "select pow(c5,2 ) from stb1", - "select pow(c6,2 ) from stb1", - - "select pow(c6,2) as alisb from stb1", - "select pow(c6,2) alisb from stb1", + f"select pow(c1,2 ) from {dbname}.t1", + f"select pow(c2,2 ) from {dbname}.t1", + f"select pow(c3,2 ) from {dbname}.t1", + f"select pow(c4,2 ) from {dbname}.t1", + f"select pow(c5,2 ) from {dbname}.t1", + f"select pow(c6,2 ) from {dbname}.t1", + + f"select pow(c1,2 ) from {dbname}.ct1", + f"select pow(c2,2 ) from {dbname}.ct1", + f"select pow(c3,2 ) from {dbname}.ct1", + f"select pow(c4,2 ) from {dbname}.ct1", + f"select pow(c5,2 ) from {dbname}.ct1", + f"select pow(c6,2 ) from {dbname}.ct1", + + f"select pow(c1,2 ) from {dbname}.ct3", + f"select pow(c2,2 ) from {dbname}.ct3", + f"select pow(c3,2 ) from {dbname}.ct3", + f"select pow(c4,2 ) from {dbname}.ct3", + f"select pow(c5,2 ) from {dbname}.ct3", + f"select pow(c6,2 ) from {dbname}.ct3", + + f"select pow(c1,2 ) from {dbname}.stb1", + f"select pow(c2,2 ) from {dbname}.stb1", + f"select pow(c3,2 ) from {dbname}.stb1", + f"select pow(c4,2 ) from {dbname}.stb1", + f"select pow(c5,2 ) from {dbname}.stb1", + f"select pow(c6,2 ) from {dbname}.stb1", + + f"select pow(c6,2) as alisb from {dbname}.stb1", + f"select pow(c6,2) alisb from {dbname}.stb1", ] for type_sql in type_sql_lists: tdSql.query(type_sql) - - def basic_pow_function(self): - # basic query - tdSql.query("select c1 from ct3") + def basic_pow_function(self, dbname="db"): + + # basic query + tdSql.query(f"select c1 from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select c1 from t1") + tdSql.query(f"select c1 from {dbname}.t1") tdSql.checkRows(12) - tdSql.query("select c1 from stb1") + tdSql.query(f"select c1 from {dbname}.stb1") tdSql.checkRows(25) # used for empty table , ct3 is empty - tdSql.query("select pow(c1 ,2) from ct3") + tdSql.query(f"select pow(c1 ,2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select pow(c2 ,2) from ct3") + tdSql.query(f"select pow(c2 ,2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select pow(c3 ,2) from ct3") + tdSql.query(f"select pow(c3 ,2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select pow(c4 ,2) from ct3") + tdSql.query(f"select pow(c4 ,2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select pow(c5 ,2) from ct3") + tdSql.query(f"select pow(c5 ,2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select pow(c6 ,2) from ct3") + tdSql.query(f"select pow(c6 ,2) from {dbname}.ct3") tdSql.checkRows(0) # # used for regular table - tdSql.query("select pow(c1 ,2) from t1") + tdSql.query(f"select pow(c1 ,2) from {dbname}.t1") tdSql.checkData(0, 0, None) tdSql.checkData(1 , 0, 1.000000000) tdSql.checkData(3 , 0, 9.000000000) tdSql.checkData(5 , 0, None) - tdSql.query("select c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 4, 1.11000) tdSql.checkData(3, 3, 33) tdSql.checkData(5, 4, None) - tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 5, 1.11000) tdSql.checkData(3, 4, 33) tdSql.checkData(5, 5, None) - self.check_result_auto_pow2( "select c1, c2, c3 , c4, c5 from t1", "select pow(c1 ,2), pow(c2 ,2) ,pow(c3, 2), pow(c4 ,2), pow(c5 ,2) from t1") - self.check_result_auto_pow1( "select c1, c2, c3 , c4, c5 from t1", "select pow(c1 ,1), pow(c2 ,1) ,pow(c3, 1), pow(c4 ,1), pow(c5 ,1) from t1") - self.check_result_auto_pow__10( "select c1, c2, c3 , c4, c5 from t1", "select pow(c1 ,-10), pow(c2 ,-10) ,pow(c3, -10), pow(c4 ,-10), pow(c5 ,-10) from t1") - + self.check_result_auto_pow( 2, f"select c1, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,2) , pow(c3, 2), pow(c4 ,2), pow(c5 ,2) from {dbname}.t1") + self.check_result_auto_pow( 1,f"select c1, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,1) , pow(c3, 1), pow(c4 ,1), pow(c5 ,1) from {dbname}.t1") + self.check_result_auto_pow( 10,f"select c1, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,10) ,pow(c3, 10), pow(c4 ,10), pow(c5 ,10) from {dbname}.t1") + # used for sub table - tdSql.query("select c1 ,pow(c1 ,2) from ct1") + tdSql.query(f"select c1 ,pow(c1 ,2) from {dbname}.ct1") tdSql.checkData(0, 1, 64.000000000) tdSql.checkData(1 , 1, 49.000000000) tdSql.checkData(3 , 1, 25.000000000) @@ -323,7 +246,7 @@ class TDTestCase: # # test bug fix for pow(c1,c2) - tdSql.query("select c1, c5 ,pow(c1,c5) from ct4") + tdSql.query(f"select c1, c5 ,pow(c1,c5) from {dbname}.ct4") tdSql.checkData(0 , 2, None) tdSql.checkData(1 , 2, 104577724.506799981) tdSql.checkData(2 , 2, 3684781.623933245) @@ -331,11 +254,11 @@ class TDTestCase: tdSql.checkData(4 , 2, 7573.273783071) - self.check_result_auto_pow2( "select c1, c2, c3 , c4, c5 from ct1", "select pow(c1,2), pow(c2,2) ,pow(c3,2), pow(c4,2), pow(c5,2) from ct1") - self.check_result_auto_pow__10( "select c1, c2, c3 , c4, c5 from ct1", "select pow(c1,-10), pow(c2,-10) ,pow(c3,-10), pow(c4,-10), pow(c5,-10) from ct1") + self.check_result_auto_pow( 2, f"select c1, c3 , c4, c5 from {dbname}.ct1", f"select pow(c1,2), pow(c3,2), pow(c4,2), pow(c5,2) from {dbname}.ct1") + self.check_result_auto_pow( 10, f"select c1, c3 , c4, c5 from {dbname}.ct1", f"select pow(c1,10), pow(c3,10), pow(c4,10), pow(c5,10) from {dbname}.ct1") # nest query for pow functions - tdSql.query("select c1 , pow(c1,2) ,pow(pow(c1,2),2) , pow(pow(pow(c1,2),2),2) from ct1;") + tdSql.query(f"select c1 , pow(c1,2) ,pow(pow(c1,2),2) , pow(pow(pow(c1,2),2),2) from {dbname}.ct1;") tdSql.checkData(0 , 0 , 8) tdSql.checkData(0 , 1 , 64.000000000) tdSql.checkData(0 , 2 , 4096.000000000) @@ -351,24 +274,24 @@ class TDTestCase: tdSql.checkData(4 , 2 , 0.000000000) tdSql.checkData(4 , 3 , 0.000000000) - # # used for stable table - - tdSql.query("select pow(c1, 2) from stb1") + # # used for stable table + + tdSql.query(f"select pow(c1, 2) from {dbname}.stb1") tdSql.checkRows(25) - + # used for not exists table - tdSql.error("select pow(c1, 2) from stbbb1") - tdSql.error("select pow(c1, 2) from tbname") - tdSql.error("select pow(c1, 2) from ct5") + tdSql.error(f"select pow(c1, 2) from {dbname}.stbbb1") + tdSql.error(f"select pow(c1, 2) from {dbname}.tbname") + tdSql.error(f"select pow(c1, 2) from {dbname}.ct5") - # mix with common col - tdSql.query("select c1, pow(c1 ,2) from ct1") + # mix with common col + tdSql.query(f"select c1, pow(c1 ,2) from {dbname}.ct1") tdSql.checkData(0 , 0 ,8) tdSql.checkData(0 , 1 ,64.000000000) tdSql.checkData(4 , 0 ,0) tdSql.checkData(4 , 1 ,0.000000000) - tdSql.query("select c1, pow(c1,2) from ct4") + tdSql.query(f"select c1, pow(c1,2) from {dbname}.ct4") tdSql.checkData(0 , 0 , None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(4 , 0 ,5) @@ -377,45 +300,45 @@ class TDTestCase: tdSql.checkData(5 , 1 ,None) # mix with common functions - tdSql.query("select c1, pow(c1 ,2),pow(c1,2), log(pow(c1,2) ,2) from ct4 ") + tdSql.query(f"select c1, pow(c1 ,2),pow(c1,2), log(pow(c1,2) ,2) from {dbname}.ct4 ") tdSql.checkData(0 , 0 ,None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 2 ,None) tdSql.checkData(0 , 3 ,None) - + tdSql.checkData(3 , 0 , 6) tdSql.checkData(3 , 1 ,36.000000000) tdSql.checkData(3 , 2 ,36.000000000) tdSql.checkData(3 , 3 ,5.169925001) - tdSql.query("select c1, pow(c1,1),c5, floor(c5 ) from stb1 ") + tdSql.query(f"select c1, pow(c1,1),c5, floor(c5 ) from {dbname}.stb1 ") # # mix with agg functions , not support - tdSql.error("select c1, pow(c1 ,2),c5, count(c5) from stb1 ") - tdSql.error("select c1, pow(c1 ,2),c5, count(c5) from ct1 ") - tdSql.error("select pow(c1 ,2), count(c5) from stb1 ") - tdSql.error("select pow(c1 ,2), count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from stb1 ") + tdSql.error(f"select c1, pow(c1 ,2),c5, count(c5) from {dbname}.stb1 ") + tdSql.error(f"select c1, pow(c1 ,2),c5, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select pow(c1 ,2), count(c5) from {dbname}.stb1 ") + tdSql.error(f"select pow(c1 ,2), count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ") # agg functions mix with agg functions - tdSql.query("select max(c5), count(c5) from stb1") - tdSql.query("select max(c5), count(c5) from ct1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1") + - # bug fix for count - tdSql.query("select count(c1) from ct4 ") + tdSql.query(f"select count(c1) from {dbname}.ct4 ") tdSql.checkData(0,0,9) - tdSql.query("select count(*) from ct4 ") + tdSql.query(f"select count(*) from {dbname}.ct4 ") tdSql.checkData(0,0,12) - tdSql.query("select count(c1) from stb1 ") + tdSql.query(f"select count(c1) from {dbname}.stb1 ") tdSql.checkData(0,0,22) - tdSql.query("select count(*) from stb1 ") + tdSql.query(f"select count(*) from {dbname}.stb1 ") tdSql.checkData(0,0,25) - # # bug fix for compute - tdSql.query("select c1, pow(c1 ,2) -0 ,pow(c1-4 ,2)-0 from ct4 ") + # # bug fix for compute + tdSql.query(f"select c1, pow(c1 ,2) -0 ,pow(c1-4 ,2)-0 from {dbname}.ct4 ") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -423,7 +346,7 @@ class TDTestCase: tdSql.checkData(1, 1, 64.000000000) tdSql.checkData(1, 2, 16.000000000) - tdSql.query(" select c1, pow(c1 ,2) -0 ,pow(c1-0.1 ,2)-0.1 from ct4") + tdSql.query(f"select c1, pow(c1 ,2) -0 ,pow(c1-0.1 ,2)-0.1 from {dbname}.ct4") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -431,87 +354,86 @@ class TDTestCase: tdSql.checkData(1, 1, 64.000000000) tdSql.checkData(1, 2, 62.310000000) - tdSql.query("select c1, pow(c1, -10), c2, pow(c2, -10), c3, pow(c3, -10) from ct1") + tdSql.query(f"select c1, pow(c1, -10), c2, pow(c2, -10), c3, pow(c3, -10) from {dbname}.ct1") - def test_big_number(self): + def test_big_number(self, dbname="db"): - tdSql.query("select c1, pow(c1, 100000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, pow(c1, 100000000) from {dbname}.ct1") # bigint to double data overflow tdSql.checkData(0, 1, None) tdSql.checkData(1, 1, None) tdSql.checkData(4, 1, 0.000000000) - tdSql.query("select c1, pow(c1, 10000000000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, pow(c1, 10000000000000) from {dbname}.ct1") # bigint to double data overflow tdSql.checkData(0, 1, None) tdSql.checkData(1, 1, None) tdSql.checkData(4, 1, 0.000000000) - tdSql.query("select c1, pow(c1, 10000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, pow(c1, 10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, pow(c1, 10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, pow(c1, 10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(0, 1, None) tdSql.checkData(1, 1, None) tdSql.checkData(4, 1, 0.000000000) - tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(0, 1, None) tdSql.checkData(1, 1, None) tdSql.checkData(4, 1, 0.000000000) - tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(0, 1, None) tdSql.checkData(1, 1, None) tdSql.checkData(4, 1, 0.000000000) - tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow - def pow_base_test(self): + def pow_base_test(self, dbname="db"): # base is an regular number ,int or double - tdSql.query("select c1, pow(c1, 2) from ct1") + tdSql.query(f"select c1, pow(c1, 2) from {dbname}.ct1") tdSql.checkData(0, 1,64.000000000) - tdSql.query("select c1, pow(c1, 2.0) from ct1") + tdSql.query(f"select c1, pow(c1, 2.0) from {dbname}.ct1") tdSql.checkData(0, 1, 64.000000000) - tdSql.query("select c1, pow(1, 2.0) from ct1") + tdSql.query(f"select c1, pow(1, 2.0) from {dbname}.ct1") tdSql.checkData(0, 1, 1.000000000) tdSql.checkRows(13) # # bug for compute in functions - # tdSql.query("select c1, abs(1/0) from ct1") + # tdSql.query(f"select c1, abs(1/0) from {dbname}.ct1") # tdSql.checkData(0, 0, 8) # tdSql.checkData(0, 1, 1) - tdSql.query("select c1, pow(1, 2.0) from ct1") + tdSql.query(f"select c1, pow(1, 2.0) from {dbname}.ct1") tdSql.checkData(0, 1, 1.000000000) tdSql.checkRows(13) # two cols start pow(x,y) - tdSql.query("select c1,c2, pow(c1,c2) from ct1") + tdSql.query(f"select c1,c2, pow(c1,c2) from {dbname}.ct1") tdSql.checkData(0, 2, None) tdSql.checkData(1, 2, None) tdSql.checkData(4, 2, 1.000000000) - tdSql.query("select c1,c2, pow(c2,c1) from ct1") + tdSql.query(f"select c1,c2, pow(c2,c1) from {dbname}.ct1") tdSql.checkData(0, 2, 3897131646727578700481513520437089271808.000000000) tdSql.checkData(1, 2, 17217033054561120738612297152331776.000000000) tdSql.checkData(4, 2, 1.000000000) - tdSql.query("select c1, pow(2.0 , c1) from ct1") + tdSql.query(f"select c1, pow(2.0 , c1) from {dbname}.ct1") tdSql.checkData(0, 1, 256.000000000) tdSql.checkData(1, 1, 128.000000000) tdSql.checkData(4, 1, 1.000000000) - tdSql.query("select c1, pow(2.0 , c1) from ct1") + tdSql.query(f"select c1, pow(2.0 , c1) from {dbname}.ct1") tdSql.checkData(0, 1, 256.000000000) tdSql.checkData(1, 1, 128.000000000) tdSql.checkData(4, 1, 1.000000000) - def abs_func_filter(self): - tdSql.execute("use db") - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1>5 ") + def abs_func_filter(self, dbname="db"): + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from {dbname}.ct4 where c1>5 ") tdSql.checkRows(3) tdSql.checkData(0,0,8) tdSql.checkData(0,1,8.000000000) @@ -519,7 +441,7 @@ class TDTestCase: tdSql.checkData(0,3,7.900000000) tdSql.checkData(0,4,64.000000000) - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1=5 ") + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from {dbname}.ct4 where c1=5 ") tdSql.checkRows(1) tdSql.checkData(0,0,5) tdSql.checkData(0,1,5.000000000) @@ -527,7 +449,7 @@ class TDTestCase: tdSql.checkData(0,3,4.900000000) tdSql.checkData(0,4,25.000000000) - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1=5 ") + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from {dbname}.ct4 where c1=5 ") tdSql.checkRows(1) tdSql.checkData(0,0,5) tdSql.checkData(0,1,5.000000000) @@ -535,7 +457,7 @@ class TDTestCase: tdSql.checkData(0,3,4.900000000) tdSql.checkData(0,4,25.000000000) - tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1 0 order by tbname " , "select pow(c5,2) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_pow2( " select c5 from stb1 where c1 > 0 order by tbname " , "select pow(c5,2) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_pow2( " select t1,c5 from stb1 order by ts " , "select pow(t1,2), pow(c5,2) from stb1 order by ts" ) - self.check_result_auto_pow2( " select t1,c5 from stb1 order by tbname " , "select pow(t1,2) ,pow(c5,2) from stb1 order by tbname" ) - self.check_result_auto_pow2( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select pow(t1,2) ,pow(c5,2) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_pow2( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select pow(t1,2) , pow(c5,2) from stb1 where c1 > 0 order by tbname" ) - pass - - + def support_super_table_test(self, dbname="db"): + self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 order by ts " , f"select pow(c5,2) from {dbname}.stb1 order by ts" ) + self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 order by ts " , f"select pow(t1,2), pow(c5,2) from {dbname}.stb1 order by ts" ) + self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select pow(t1,2) ,pow(c5,2) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(t1,2) ,pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(t1,2) , pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" ) def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() tdLog.printNoPrefix("==========step1:create table ==============") - + self.prepare_datas() - tdLog.printNoPrefix("==========step2:test errors ==============") + tdLog.printNoPrefix("==========step2:test errors ==============") self.test_errors() - - tdLog.printNoPrefix("==========step3:support types ============") + + tdLog.printNoPrefix("==========step3:support types ============") self.support_types() - tdLog.printNoPrefix("==========step4: pow basic query ============") + tdLog.printNoPrefix("==========step4: pow basic query ============") self.basic_pow_function() - tdLog.printNoPrefix("==========step5: big number pow query ============") + tdLog.printNoPrefix("==========step5: big number pow query ============") self.test_big_number() - tdLog.printNoPrefix("==========step6: base number for pow query ============") + tdLog.printNoPrefix("==========step6: base number for pow query ============") self.pow_base_test() - tdLog.printNoPrefix("==========step7: pow boundary query ============") + tdLog.printNoPrefix("==========step7: pow boundary query ============") self.check_boundary_values() - tdLog.printNoPrefix("==========step8: pow filter query ============") + tdLog.printNoPrefix("==========step8: pow filter query ============") self.abs_func_filter() tdLog.printNoPrefix("==========step9: check pow result of stable query ============") - self.support_super_table_test() + self.support_super_table_test() def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/qnodeCluster.py b/tests/system-test/2-query/qnodeCluster.py index f68eb58a7a0820333b50258cf7cd29d860153cac..9e49bff9389deeb83839477c98e194c014a2a87f 100644 --- a/tests/system-test/2-query/qnodeCluster.py +++ b/tests/system-test/2-query/qnodeCluster.py @@ -13,9 +13,9 @@ from util.common import * sys.path.append("./6-cluster/") from clusterCommonCreate import * -from clusterCommonCheck import clusterComCheck +from clusterCommonCheck import clusterComCheck -import threading +import threading class TDTestCase: @@ -28,7 +28,7 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), True) + tdSql.init(conn.cursor(), False) def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1): tsql.execute("use %s" %dbName) @@ -47,7 +47,7 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) return @@ -55,7 +55,7 @@ class TDTestCase: dbname="db_tsbs" stabname1="readings" stabname2="diagnostics" - ctbnamePre1="rct" + ctbnamePre1="rct" ctbnamePre2="dct" ctbNums=40 self.ctbNums=ctbNums @@ -73,7 +73,7 @@ class TDTestCase: self.create_ctable(tsql=tdSql,dbName=dbname,stbName=stabname2,ctbPrefix=ctbnamePre2,ctbNum=ctbNums) - for j in range(ctbNums): + for j in range(ctbNums): for i in range(rowNUms): tdSql.execute( f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )" @@ -109,19 +109,19 @@ class TDTestCase: def tsbsIotQuery(self,tdSql): tdSql.execute("use db_tsbs") - + # test interval and partition tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ") # print(tdSql.queryResult) parRows=tdSql.queryRows tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ") tdSql.checkRows(parRows) - - - # # test insert into + + + # # test insert into # tdSql.execute("create table testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;") # tdSql.query("insert into testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);") - + # tdSql.query("insert into testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);") @@ -141,7 +141,7 @@ class TDTestCase: tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;") - # 2 stationary-trucks + # 2 stationary-trucks tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)") tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name") @@ -156,7 +156,7 @@ class TDTestCase: tdSql.query("select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;") - # # 6. avg-daily-driving-session + # # 6. avg-daily-driving-session # #taosc core dumped # tdSql.execute("create table random_measure2_1 (ts timestamp,ela float, name binary(40))") # tdSql.query("SELECT ts,diff(mv) AS difka FROM (SELECT ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name,ts interval(10m) fill(value,0)) GROUP BY name,ts;") @@ -166,7 +166,7 @@ class TDTestCase: # 7. avg-load tdSql.query("SELECT fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml FROM diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;") - # 8. daily-activity + # 8. daily-activity tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") @@ -184,7 +184,7 @@ class TDTestCase: tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;") - + #it's already supported: # last-loc tdSql.query("SELECT last_row(ts),latitude,longitude,name,driver FROM readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;") @@ -192,7 +192,7 @@ class TDTestCase: #2. low-fuel tdSql.query("SELECT last_row(ts),name,driver,fuel_state,driver FROM diagnostics WHERE fuel_state <= 0.1 AND fleet = 'South' and name IS NOT NULL GROUP BY name,driver order by name;") - + # 3. avg-vs-projected-fuel-consumption tdSql.query("select avg(fuel_consumption) as avg_fuel_consumption,avg(nominal_fuel_consumption) as nominal_fuel_consumption from readings where velocity > 1 group by fleet") @@ -213,16 +213,16 @@ class TDTestCase: 'ctbPrefix': 'ctb', 'ctbNum': 1, } - + dnodeNumbers=int(dnodeNumbers) mnodeNums=int(mnodeNums) vnodeNumbers = int(dnodeNumbers-mnodeNums) - + tdSql.query("select * from information_schema.ins_dnodes;") tdLog.debug(tdSql.queryResult) clusterComCheck.checkDnodes(dnodeNumbers) - tdLog.info("create database and stable") + tdLog.info("create database and stable") tdDnodes=cluster.dnodes stopcount =0 threads=[] @@ -234,7 +234,7 @@ class TDTestCase: for tr in threads: tr.start() - tdLog.info("Take turns stopping %s "%stopRole) + tdLog.info("Take turns stopping %s "%stopRole) while stopcount < restartNumbers: tdLog.info(" restart loop: %d"%stopcount ) if stopRole == "mnode": @@ -242,7 +242,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) elif stopRole == "vnode": for i in range(vnodeNumbers): tdDnodes[i+mnodeNums].stoptaosd() @@ -254,7 +254,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) # dnodeNumbers don't include database of schema if clusterComCheck.checkDnodes(dnodeNumbers): @@ -265,12 +265,12 @@ class TDTestCase: tdLog.exit("one or more of dnodes failed to start ") # self.check3mnode() stopcount+=1 - + for tr in threads: tr.join() - def run(self): + def run(self): tdLog.printNoPrefix("==========step1:create database and table,insert data ==============") self.createCluster() self.prepareData() diff --git a/tests/system-test/2-query/query_cols_tags_and_or.py b/tests/system-test/2-query/query_cols_tags_and_or.py index e0fb986d79d8491bf2bd23e82ccde85914c76541..af3fbb83c070202368f317b119377035ac133e16 100644 --- a/tests/system-test/2-query/query_cols_tags_and_or.py +++ b/tests/system-test/2-query/query_cols_tags_and_or.py @@ -19,7 +19,7 @@ class TDTestCase: def init(self, conn, logSql): ## add for TD-6672 tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) + tdSql.init(conn.cursor(), False) def insertData(self, tb_name): insert_sql_list = [f'insert into {tb_name} values ("2021-01-01 12:00:00", 1, 1, 1, 3, 1.1, 1.1, "binary", "nchar", true, 1, 2, 3, 4)', @@ -37,17 +37,17 @@ class TDTestCase: for sql in insert_sql_list: tdSql.execute(sql) - def initTb(self): - tdCom.cleanTb() - tb_name = tdCom.getLongName(8, "letters") + def initTb(self, dbname="db"): + tdCom.cleanTb(dbname) + tb_name = f'{dbname}.{tdCom.getLongName(8, "letters")}' tdSql.execute( f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned)") self.insertData(tb_name) return tb_name - def initStb(self, count=5): - tdCom.cleanTb() - tb_name = tdCom.getLongName(8, "letters") + def initStb(self, count=5, dbname="db"): + tdCom.cleanTb(dbname) + tb_name = f'{dbname}.{tdCom.getLongName(8, "letters")}' tdSql.execute( f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool, t10 tinyint unsigned, t11 smallint unsigned, t12 int unsigned, t13 bigint unsigned)") for i in range(1, count+1): @@ -56,9 +56,10 @@ class TDTestCase: self.insertData(f'{tb_name}_sub_{i}') return tb_name - def initTwoStb(self): - tdCom.cleanTb() - tb_name = tdCom.getLongName(8, "letters") + def initTwoStb(self, dbname="db"): + tdCom.cleanTb(dbname) + tb_name = f'{dbname}.{tdCom.getLongName(8, "letters")}' + # tb_name = tdCom.getLongName(8, "letters") tb_name1 = f'{tb_name}1' tb_name2 = f'{tb_name}2' tdSql.execute( diff --git a/tests/system-test/2-query/round.py b/tests/system-test/2-query/round.py index 551e225a4d02025780b4238e2079b70249dcdd5a..1d69d3c9afa1d7fffc3b8eac80c2b648a54bc74e 100644 --- a/tests/system-test/2-query/round.py +++ b/tests/system-test/2-query/round.py @@ -8,49 +8,46 @@ from util.sql import * from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - def prepare_datas(self): + def prepare_datas(self, dbname="db"): tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -94,68 +91,68 @@ class TDTestCase: else: tdLog.info("round value check pass , it work as expected ,sql is \"%s\" "%round_query ) - def test_errors(self): + def test_errors(self, dbname="db"): error_sql_lists = [ - "select round from t1", - # "select round(-+--+c1) from t1", - # "select +-round(c1) from t1", - # "select ++-round(c1) from t1", - # "select ++--round(c1) from t1", - # "select - -round(c1)*0 from t1", - # "select round(tbname+1) from t1 ", - "select round(123--123)==1 from t1", - "select round(c1) as 'd1' from t1", - "select round(c1 ,c2 ) from t1", - "select round(c1 ,NULL) from t1", - "select round(,) from t1;", - "select round(round(c1) ab from t1)", - "select round(c1) as int from t1", - "select round from stb1", - # "select round(-+--+c1) from stb1", - # "select +-round(c1) from stb1", - # "select ++-round(c1) from stb1", - # "select ++--round(c1) from stb1", - # "select - -round(c1)*0 from stb1", - # "select round(tbname+1) from stb1 ", - "select round(123--123)==1 from stb1", - "select round(c1) as 'd1' from stb1", - "select round(c1 ,c2 ) from stb1", - "select round(c1 ,NULL) from stb1", - "select round(,) from stb1;", - "select round(round(c1) ab from stb1)", - "select round(c1) as int from stb1" + f"select round from {dbname}.t1", + # f"select round(-+--+c1) from {dbname}.t1", + # f"select +-round(c1) from {dbname}.t1", + # f"select ++-round(c1) from {dbname}.t1", + # f"select ++--round(c1) from {dbname}.t1", + # f"select - -round(c1)*0 from {dbname}.t1", + # f"select round(tbname+1) from {dbname}.t1 ", + f"select round(123--123)==1 from {dbname}.t1", + f"select round(c1) as 'd1' from {dbname}.t1", + f"select round(c1 ,c2 ) from {dbname}.t1", + f"select round(c1 ,NULL) from {dbname}.t1", + f"select round(,) from {dbname}.t1;", + f"select round(round(c1) ab from {dbname}.t1)", + f"select round(c1) as int from {dbname}.t1", + f"select round from {dbname}.stb1", + # f"select round(-+--+c1) from {dbname}.stb1", + # f"select +-round(c1) from {dbname}.stb1", + # f"select ++-round(c1) from {dbname}.stb1", + # f"select ++--round(c1) from {dbname}.stb1", + # f"select - -round(c1)*0 from {dbname}.stb1", + # f"select round(tbname+1) from {dbname}.stb1 ", + f"select round(123--123)==1 from {dbname}.stb1", + f"select round(c1) as 'd1' from {dbname}.stb1", + f"select round(c1 ,c2 ) from {dbname}.stb1", + f"select round(c1 ,NULL) from {dbname}.stb1", + f"select round(,) from {dbname}.stb1;", + f"select round(round(c1) ab from {dbname}.stb1)", + f"select round(c1) as int from {dbname}.stb1" ] for error_sql in error_sql_lists: tdSql.error(error_sql) - def support_types(self): + def support_types(self, dbname="db"): type_error_sql_lists = [ - "select round(ts) from t1" , - "select round(c7) from t1", - "select round(c8) from t1", - "select round(c9) from t1", - "select round(ts) from ct1" , - "select round(c7) from ct1", - "select round(c8) from ct1", - "select round(c9) from ct1", - "select round(ts) from ct3" , - "select round(c7) from ct3", - "select round(c8) from ct3", - "select round(c9) from ct3", - "select round(ts) from ct4" , - "select round(c7) from ct4", - "select round(c8) from ct4", - "select round(c9) from ct4", - "select round(ts) from stb1" , - "select round(c7) from stb1", - "select round(c8) from stb1", - "select round(c9) from stb1" , - - "select round(ts) from stbbb1" , - "select round(c7) from stbbb1", - - "select round(ts) from tbname", - "select round(c9) from tbname" + f"select round(ts) from {dbname}.t1" , + f"select round(c7) from {dbname}.t1", + f"select round(c8) from {dbname}.t1", + f"select round(c9) from {dbname}.t1", + f"select round(ts) from {dbname}.ct1" , + f"select round(c7) from {dbname}.ct1", + f"select round(c8) from {dbname}.ct1", + f"select round(c9) from {dbname}.ct1", + f"select round(ts) from {dbname}.ct3" , + f"select round(c7) from {dbname}.ct3", + f"select round(c8) from {dbname}.ct3", + f"select round(c9) from {dbname}.ct3", + f"select round(ts) from {dbname}.ct4" , + f"select round(c7) from {dbname}.ct4", + f"select round(c8) from {dbname}.ct4", + f"select round(c9) from {dbname}.ct4", + f"select round(ts) from {dbname}.stb1" , + f"select round(c7) from {dbname}.stb1", + f"select round(c8) from {dbname}.stb1", + f"select round(c9) from {dbname}.stb1" , + + f"select round(ts) from {dbname}.stbbb1" , + f"select round(c7) from {dbname}.stbbb1", + + f"select round(ts) from {dbname}.tbname", + f"select round(c9) from {dbname}.tbname" ] @@ -164,127 +161,127 @@ class TDTestCase: type_sql_lists = [ - "select round(c1) from t1", - "select round(c2) from t1", - "select round(c3) from t1", - "select round(c4) from t1", - "select round(c5) from t1", - "select round(c6) from t1", - - "select round(c1) from ct1", - "select round(c2) from ct1", - "select round(c3) from ct1", - "select round(c4) from ct1", - "select round(c5) from ct1", - "select round(c6) from ct1", - - "select round(c1) from ct3", - "select round(c2) from ct3", - "select round(c3) from ct3", - "select round(c4) from ct3", - "select round(c5) from ct3", - "select round(c6) from ct3", - - "select round(c1) from stb1", - "select round(c2) from stb1", - "select round(c3) from stb1", - "select round(c4) from stb1", - "select round(c5) from stb1", - "select round(c6) from stb1", - - "select round(c6) as alisb from stb1", - "select round(c6) alisb from stb1", + f"select round(c1) from {dbname}.t1", + f"select round(c2) from {dbname}.t1", + f"select round(c3) from {dbname}.t1", + f"select round(c4) from {dbname}.t1", + f"select round(c5) from {dbname}.t1", + f"select round(c6) from {dbname}.t1", + + f"select round(c1) from {dbname}.ct1", + f"select round(c2) from {dbname}.ct1", + f"select round(c3) from {dbname}.ct1", + f"select round(c4) from {dbname}.ct1", + f"select round(c5) from {dbname}.ct1", + f"select round(c6) from {dbname}.ct1", + + f"select round(c1) from {dbname}.ct3", + f"select round(c2) from {dbname}.ct3", + f"select round(c3) from {dbname}.ct3", + f"select round(c4) from {dbname}.ct3", + f"select round(c5) from {dbname}.ct3", + f"select round(c6) from {dbname}.ct3", + + f"select round(c1) from {dbname}.stb1", + f"select round(c2) from {dbname}.stb1", + f"select round(c3) from {dbname}.stb1", + f"select round(c4) from {dbname}.stb1", + f"select round(c5) from {dbname}.stb1", + f"select round(c6) from {dbname}.stb1", + + f"select round(c6) as alisb from {dbname}.stb1", + f"select round(c6) alisb from {dbname}.stb1", ] for type_sql in type_sql_lists: tdSql.query(type_sql) - def basic_round_function(self): + def basic_round_function(self, dbname="db"): # basic query - tdSql.query("select c1 from ct3") + tdSql.query(f"select c1 from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select c1 from t1") + tdSql.query(f"select c1 from {dbname}.t1") tdSql.checkRows(12) - tdSql.query("select c1 from stb1") + tdSql.query(f"select c1 from {dbname}.stb1") tdSql.checkRows(25) # used for empty table , ct3 is empty - tdSql.query("select round(c1) from ct3") + tdSql.query(f"select round(c1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select round(c2) from ct3") + tdSql.query(f"select round(c2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select round(c3) from ct3") + tdSql.query(f"select round(c3) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select round(c4) from ct3") + tdSql.query(f"select round(c4) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select round(c5) from ct3") + tdSql.query(f"select round(c5) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select round(c6) from ct3") + tdSql.query(f"select round(c6) from {dbname}.ct3") # used for regular table - tdSql.query("select round(c1) from t1") + tdSql.query(f"select round(c1) from {dbname}.t1") tdSql.checkData(0, 0, None) tdSql.checkData(1 , 0, 1) tdSql.checkData(3 , 0, 3) tdSql.checkData(5 , 0, None) - tdSql.query("select c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 4, 1.11000) tdSql.checkData(3, 3, 33) tdSql.checkData(5, 4, None) - tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 5, 1.11000) tdSql.checkData(3, 4, 33) tdSql.checkData(5, 5, None) - self.check_result_auto( "select c1, c2, c3 , c4, c5 from t1", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from t1") + self.check_result_auto( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select (c1), round(c2) ,round(c3), round(c4), round(c5) from {dbname}.t1") # used for sub table - tdSql.query("select round(c1) from ct1") + tdSql.query(f"select round(c1) from {dbname}.ct1") tdSql.checkData(0, 0, 8) tdSql.checkData(1 , 0, 7) tdSql.checkData(3 , 0, 5) tdSql.checkData(5 , 0, 4) - tdSql.query("select round(c1) from ct1") - self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct1", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from ct1") - self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from ct1;","select c1 from ct1" ) + tdSql.query(f"select round(c1) from {dbname}.ct1") + self.check_result_auto( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select (c1), round(c2) ,round(c3), round(c4), round(c5) from {dbname}.ct1") + self.check_result_auto(f"select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from {dbname}.ct1;",f"select c1 from {dbname}.ct1" ) # used for stable table - tdSql.query("select round(c1) from stb1") + tdSql.query(f"select round(c1) from {dbname}.stb1") tdSql.checkRows(25) - self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct4 ", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from ct4") - self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from ct4;" , "select c1 from ct4" ) + self.check_result_auto( f"select c1, c2, c3 , c4, c5 from {dbname}.ct4 ", f"select (c1), round(c2) ,round(c3), round(c4), round(c5) from {dbname}.ct4") + self.check_result_auto(f"select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from {dbname}.ct4;" , f"select c1 from {dbname}.ct4" ) # used for not exists table - tdSql.error("select round(c1) from stbbb1") - tdSql.error("select round(c1) from tbname") - tdSql.error("select round(c1) from ct5") + tdSql.error(f"select round(c1) from {dbname}.stbbb1") + tdSql.error(f"select round(c1) from {dbname}.tbname") + tdSql.error(f"select round(c1) from {dbname}.ct5") # mix with common col - tdSql.query("select c1, round(c1) from ct1") + tdSql.query(f"select c1, round(c1) from {dbname}.ct1") tdSql.checkData(0 , 0 ,8) tdSql.checkData(0 , 1 ,8) tdSql.checkData(4 , 0 ,0) tdSql.checkData(4 , 1 ,0) - tdSql.query("select c1, round(c1) from ct4") + tdSql.query(f"select c1, round(c1) from {dbname}.ct4") tdSql.checkData(0 , 0 , None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(4 , 0 ,5) tdSql.checkData(4 , 1 ,5) tdSql.checkData(5 , 0 ,None) tdSql.checkData(5 , 1 ,None) - tdSql.query("select c1, round(c1) from ct4 ") + tdSql.query(f"select c1, round(c1) from {dbname}.ct4 ") tdSql.checkData(0 , 0 ,None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(4 , 0 ,5) tdSql.checkData(4 , 1 ,5) # mix with common functions - tdSql.query("select c1, round(c1),c5, round(c5) from ct4 ") + tdSql.query(f"select c1, round(c1),c5, round(c5) from {dbname}.ct4 ") tdSql.checkData(0 , 0 ,None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 2 ,None) @@ -300,34 +297,34 @@ class TDTestCase: tdSql.checkData(6 , 2 ,4.44000) tdSql.checkData(6 , 3 ,4.00000) - tdSql.query("select c1, round(c1),c5, round(c5) from stb1 ") + tdSql.query(f"select c1, round(c1),c5, round(c5) from {dbname}.stb1 ") # mix with agg functions , not support - tdSql.error("select c1, round(c1),c5, count(c5) from stb1 ") - tdSql.error("select c1, round(c1),c5, count(c5) from ct1 ") - tdSql.error("select round(c1), count(c5) from stb1 ") - tdSql.error("select round(c1), count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from stb1 ") + tdSql.error(f"select c1, round(c1),c5, count(c5) from {dbname}.stb1 ") + tdSql.error(f"select c1, round(c1),c5, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select round(c1), count(c5) from {dbname}.stb1 ") + tdSql.error(f"select round(c1), count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ") # agg functions mix with agg functions - tdSql.query("select max(c5), count(c5) from stb1") - tdSql.query("select max(c5), count(c5) from ct1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1") # bug fix for count - tdSql.query("select count(c1) from ct4 ") + tdSql.query(f"select count(c1) from {dbname}.ct4 ") tdSql.checkData(0,0,9) - tdSql.query("select count(*) from ct4 ") + tdSql.query(f"select count(*) from {dbname}.ct4 ") tdSql.checkData(0,0,12) - tdSql.query("select count(c1) from stb1 ") + tdSql.query(f"select count(c1) from {dbname}.stb1 ") tdSql.checkData(0,0,22) - tdSql.query("select count(*) from stb1 ") + tdSql.query(f"select count(*) from {dbname}.stb1 ") tdSql.checkData(0,0,25) # bug fix for compute - tdSql.query("select c1, abs(c1) -0 ,round(c1)-0 from ct4 ") + tdSql.query(f"select c1, abs(c1) -0 ,round(c1)-0 from {dbname}.ct4 ") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -335,7 +332,7 @@ class TDTestCase: tdSql.checkData(1, 1, 8.000000000) tdSql.checkData(1, 2, 8.000000000) - tdSql.query(" select c1, abs(c1) -0 ,round(c1-0.1)-0.1 from ct4") + tdSql.query(f"select c1, abs(c1) -0 ,round(c1-0.1)-0.1 from {dbname}.ct4") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -343,9 +340,8 @@ class TDTestCase: tdSql.checkData(1, 1, 8.000000000) tdSql.checkData(1, 2, 7.900000000) - def abs_func_filter(self): - tdSql.execute("use db") - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1>5 ") + def abs_func_filter(self, dbname="db"): + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1>5 ") tdSql.checkRows(3) tdSql.checkData(0,0,8) tdSql.checkData(0,1,8.000000000) @@ -353,7 +349,7 @@ class TDTestCase: tdSql.checkData(0,3,7.900000000) tdSql.checkData(0,4,3.000000000) - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ") + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1=5 ") tdSql.checkRows(1) tdSql.checkData(0,0,5) tdSql.checkData(0,1,5.000000000) @@ -361,7 +357,7 @@ class TDTestCase: tdSql.checkData(0,3,4.900000000) tdSql.checkData(0,4,2.000000000) - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ") + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1=5 ") tdSql.checkRows(1) tdSql.checkData(0,0,5) tdSql.checkData(0,1,5.000000000) @@ -369,7 +365,7 @@ class TDTestCase: tdSql.checkData(0,3,4.900000000) tdSql.checkData(0,4,2.000000000) - tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) , round(abs(c1))-0.5 from ct4 where c1>log(c1,2) limit 1 ") + tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) , round(abs(c1))-0.5 from {dbname}.ct4 where c1>log(c1,2) limit 1 ") tdSql.checkRows(1) tdSql.checkData(0,0,8) tdSql.checkData(0,1,88888) @@ -382,44 +378,42 @@ class TDTestCase: def round_Arithmetic(self): pass - def check_boundary_values(self): + def check_boundary_values(self, dbname="bound_test"): - tdSql.execute("drop database if exists bound_test") - tdSql.execute("create database if not exists bound_test") - time.sleep(3) - tdSql.execute("use bound_test") + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname}") tdSql.execute( - "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" ) - tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )') tdSql.execute( - f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.error( - f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - self.check_result_auto( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select round(c1), round(c2) ,round(c3), round(c4), round(c5) ,round(c6) from sub1_bound") - self.check_result_auto( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select round(c1), round(c2) ,round(c3), round(c3), round(c2) ,round(c1) from sub1_bound") - self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from sub1_bound;" , "select round(c1) from sub1_bound" ) + self.check_result_auto( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select round(c1), round(c2) ,round(c3), round(c4), round(c5) ,round(c6) from {dbname}.sub1_bound") + self.check_result_auto( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select round(c1), round(c2) ,round(c3), round(c3), round(c2) ,round(c1) from {dbname}.sub1_bound") + self.check_result_auto(f"select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from {dbname}.sub1_bound;" , f"select round(c1) from {dbname}.sub1_bound" ) # check basic elem for table per row - tdSql.query("select round(c1+0.2) ,round(c2) , round(c3+0.3) , round(c4-0.3), round(c5/2), round(c6/2) from sub1_bound ") + tdSql.query(f"select round(c1+0.2) ,round(c2) , round(c3+0.3) , round(c4-0.3), round(c5/2), round(c6/2) from {dbname}.sub1_bound ") tdSql.checkData(0, 0, 2147483647.000000000) tdSql.checkData(0, 2, 32767.000000000) tdSql.checkData(0, 3, 127.000000000) @@ -430,19 +424,18 @@ class TDTestCase: tdSql.checkData(4, 3, -123.000000000) tdSql.checkData(4, 4, -169499995645668991474575059260979281920.000000000) - self.check_result_auto("select c1+1 ,c2 , c3*1 , c4/2, c5/2, c6 from sub1_bound" ,"select round(c1+1) ,round(c2) , round(c3*1) , round(c4/2), round(c5)/2, round(c6) from sub1_bound ") + self.check_result_auto(f"select c1+1 ,c2 , c3*1 , c4/2, c5/2, c6 from {dbname}.sub1_bound" ,f"select round(c1+1) ,round(c2) , round(c3*1) , round(c4/2), round(c5)/2, round(c6) from {dbname}.sub1_bound ") - def support_super_table_test(self): - tdSql.execute(" use db ") - self.check_result_auto( " select c5 from stb1 order by ts " , "select round(c5) from stb1 order by ts" ) - self.check_result_auto( " select c5 from stb1 order by tbname " , "select round(c5) from stb1 order by tbname" ) - self.check_result_auto( " select c5 from stb1 where c1 > 0 order by tbname " , "select round(c5) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto( " select c5 from stb1 where c1 > 0 order by tbname " , "select round(c5) from stb1 where c1 > 0 order by tbname" ) + def support_super_table_test(self, dbname="db"): + self.check_result_auto( f"select c5 from {dbname}.stb1 order by ts " , f"select round(c5) from {dbname}.stb1 order by ts" ) + self.check_result_auto( f"select c5 from {dbname}.stb1 order by tbname " , f"select round(c5) from {dbname}.stb1 order by tbname" ) + self.check_result_auto( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) - self.check_result_auto( " select t1,c5 from stb1 order by ts " , "select round(t1), round(c5) from stb1 order by ts" ) - self.check_result_auto( " select t1,c5 from stb1 order by tbname " , "select round(t1) ,round(c5) from stb1 order by tbname" ) - self.check_result_auto( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select round(t1) ,round(c5) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select round(t1) , round(c5) from stb1 where c1 > 0 order by tbname" ) + self.check_result_auto( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select round(t1), round(c5) from {dbname}.stb1 order by ts" ) + self.check_result_auto( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select round(t1) ,round(c5) from {dbname}.stb1 order by tbname" ) + self.check_result_auto( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(t1) ,round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(t1) , round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) pass diff --git a/tests/system-test/2-query/rtrim.py b/tests/system-test/2-query/rtrim.py index 30624792cc33866a19c0ec1a31594cdfa438ffcf..80307e8534787889b080baa0c25a32b638c49461 100644 --- a/tests/system-test/2-query/rtrim.py +++ b/tests/system-test/2-query/rtrim.py @@ -120,16 +120,16 @@ class TDTestCase: return sqls - def __test_current(self): # sourcery skip: use-itertools-product + def __test_current(self, dbname="db"): # sourcery skip: use-itertools-product tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: self.__rtrim_check(tb) tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") - def __test_error(self): + def __test_error(self, dbname="db"): tdLog.printNoPrefix("==========err sql condition check , must return error==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: for errsql in self.__rtrim_err_check(tb): @@ -142,17 +142,15 @@ class TDTestCase: self.__test_error() - def __create_tb(self): - tdSql.prepare() - + def __create_tb(self, dbname="db"): tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table stb1( + create_stb_sql = f'''create table {dbname}.stb1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp ) tags (t1 int) ''' - create_ntb_sql = f'''create table t1( + create_ntb_sql = f'''create table {dbname}.t1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp @@ -162,29 +160,29 @@ class TDTestCase: tdSql.execute(create_ntb_sql) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') - def __insert_data(self, rows): + def __insert_data(self, rows, dbname="db"): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) for i in range(rows): tdSql.execute( - f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f'''insert into ct1 values + f'''insert into {dbname}.ct1 values ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) ''' ) tdSql.execute( - f'''insert into ct4 values + f'''insert into {dbname}.ct4 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -200,7 +198,7 @@ class TDTestCase: ) tdSql.execute( - f'''insert into ct2 values + f'''insert into {dbname}.ct2 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -216,13 +214,13 @@ class TDTestCase: ) for i in range(rows): - insert_data = f'''insert into t1 values + insert_data = f'''insert into {dbname}.t1 values ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) ''' tdSql.execute(insert_data) tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -251,8 +249,7 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - tdDnodes.stop(1) - tdDnodes.start(1) + tdSql.execute("flush database db") tdSql.execute("use db") diff --git a/tests/system-test/2-query/sample.py b/tests/system-test/2-query/sample.py index 46d2062341e67be45b5cedd72564c8be8ef04a71..7f1d7ab8c0d62fb4db7386caf3b4eeca4b3f8cba 100644 --- a/tests/system-test/2-query/sample.py +++ b/tests/system-test/2-query/sample.py @@ -11,21 +11,17 @@ # -*- coding: utf-8 -*- -from pstats import Stats import sys -import subprocess import random -import math -import numpy as np -import inspect import re -import taos from util.log import * from util.cases import * from util.sql import * from util.dnodes import * +DBNAME = "db" + class TDTestCase: def init(self, conn, logSql): @@ -33,11 +29,11 @@ class TDTestCase: tdSql.init(conn.cursor()) self.ts = 1537146000000 - def sample_query_form(self, sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + def sample_query_form(self, sel=f"select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): ''' sample function: - :param sel: string, must be "select", required parameters; + :param sel: string, must be f"select", required parameters; :param func: string, in this case must be "sample(", otherwise return other function, required parameters; :param col: string, column name, required parameters; :param m_comm: string, comma between col and k , required parameters; @@ -47,12 +43,12 @@ class TDTestCase: :param fr: string, must be "from", required parameters; :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; :param condition: expression; - :return: sample query statement,default: select sample(c1, 1) from t1 + :return: sample query statement,default: select sample(c1, 1) from {dbname}.t1 ''' return f"{sel} {func} {col} {m_comm} {k} {r_comm} {alias} {fr} {table_expr} {condition}" - def checksample(self,sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + def checksample(self,sel=f"select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr=f"{DBNAME}.t1", condition=""): # print(self.sample_query_form(sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, # table_expr=table_expr, condition=condition)) line = sys._getframe().f_back.f_lineno @@ -65,7 +61,7 @@ class TDTestCase: )) - sql = "select * from t1" + sql = f"select * from {table_expr}" collist = tdSql.getColNameList(sql) if not isinstance(col, str): @@ -125,7 +121,7 @@ class TDTestCase: # table_expr=table_expr, condition=condition # )) - if any( [func != "sample(" , r_comm != ")" , fr != "from", sel != "select"]): + if any( [func != "sample(" , r_comm != ")" , fr != "from", sel != f"select"]): print(f"case in {line}: ", end='') return tdSql.error(self.sample_query_form( sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, @@ -286,14 +282,14 @@ class TDTestCase: return else: - if "where" in condition: - condition = re.sub('where', f"where {col} is not null and ", condition) - else: - condition = f"where {col} is not null" + condition - print(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") - tdSql.query(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + # if "where" in condition: + # condition = re.sub('where', f"where {col} is not null and ", condition) + # else: + # condition = f"where {col} is not null" + condition + # print(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + # tdSql.query(f"select _c0, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") # offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 - pre_sample = tdSql.queryResult + # pre_sample = tdSql.queryResult # pre_len = tdSql.queryRows # for i in range(sample_len): # if sample_result[pre_row:pre_row + step][i] not in pre_sample: @@ -301,7 +297,7 @@ class TDTestCase: # else: # tdLog.info(f"case in {line} is success: sample data is in {group_name}") - pass + pass def sample_current_query(self) : @@ -322,24 +318,24 @@ class TDTestCase: self.checksample(**case6) # # case7~8: nested query - # case7 = {"table_expr": "(select c1 from stb1)"} - # self.checksample(**case7) - # case8 = {"table_expr": "(select sample(c1, 1) c1 from stb1 group by tbname)"} - # self.checksample(**case8) + case7 = {"table_expr": f"(select c1 from {DBNAME}.stb1)"} + self.checksample(**case7) + case8 = {"table_expr": f"(select sample(c1, 1) c1 from {DBNAME}.stb1 group by tbname)"} + self.checksample(**case8) # case9~10: mix with tbname/ts/tag/col - # case9 = {"alias": ", tbname"} - # self.checksample(**case9) - # case10 = {"alias": ", _c0"} - # self.checksample(**case10) - # case11 = {"alias": ", st1"} + case9 = {"alias": ", tbname"} + self.checksample(**case9) + case10 = {"alias": ", _c0"} + self.checksample(**case10) + case11 = {"alias": ", st1"} # self.checksample(**case11) - tdSql.query("select sample( c1 , 1 ) , st1 from t1") + tdSql.query(f"select sample( c1 , 1 ) , st1 from {DBNAME}.t1") - # case12 = {"alias": ", c1"} + case12 = {"alias": ", c1"} # self.checksample(**case12) - tdSql.query("select sample( c1 , 1 ) , c1 from t1") + tdSql.query(f"select sample( c1 , 1 ) , c1 from {DBNAME}.t1") # case13~15: with single condition case13 = {"condition": "where c1 <= 10"} @@ -353,32 +349,31 @@ class TDTestCase: case16 = {"condition": "where c6=1 or c6 =0"} self.checksample(**case16) - # # case17: only support normal table join - # case17 = { - # "col": "t1.c1", - # "table_expr": "t1, t2", - # "condition": "where t1.ts=t2.ts" - # } - # self.checksample(**case17) - # # case18~19: with group by - # case19 = { - # "table_expr": "stb1", - # "condition": "partition by tbname" - # } + # case17: only support normal table join + case17 = { + "col": "t1.c1", + "table_expr": f"{DBNAME}.t1 t1 join {DBNAME}.t2 t2 on t1.ts = t2.ts", + } + self.checksample(**case17) + # case18~19: with group by + case19 = { + "table_expr": f"{DBNAME}.stb1", + "condition": "partition by tbname" + } # self.checksample(**case19) - # # case20~21: with order by - # case20 = {"condition": "order by ts"} + # case20~21: with order by + case20 = {"condition": "order by ts"} # self.checksample(**case20) - # case21 = { - # "table_expr": "stb1", - # "condition": "partition by tbname order by tbname" - # } + case21 = { + "table_expr": f"{DBNAME}.stb1", + "condition": "partition by tbname order by tbname" + } # self.checksample(**case21) # case22: with union case22 = { - "condition": "union all select sample( c1 , 1 ) from t2" + "condition": f"union all select sample( c1 , 1 ) from {DBNAME}.t2" } self.checksample(**case22) @@ -396,12 +391,12 @@ class TDTestCase: case26 = {"k": 1000} self.checksample(**case26) case27 = { - "table_expr": "stb1", + "table_expr": f"{DBNAME}.stb1", "condition": "group by tbname slimit 1 " } self.checksample(**case27) # with slimit case28 = { - "table_expr": "stb1", + "table_expr": f"{DBNAME}.stb1", "condition": "group by tbname slimit 1 soffset 1" } self.checksample(**case28) # with soffset @@ -431,7 +426,7 @@ class TDTestCase: # err9 = {"col": "st1"} # self.checksample(**err9) # col: tag - tdSql.query(" select sample(st1 ,1) from t1 ") + tdSql.query(f"select sample(st1 ,1) from {DBNAME}.t1 ") # err10 = {"col": 1} # self.checksample(**err10) # col: value # err11 = {"col": "NULL"} @@ -494,13 +489,13 @@ class TDTestCase: self.checksample(**err39) # mix with calculation function 2 # err40 = {"alias": "+ 2"} # self.checksample(**err40) # mix with arithmetic 1 - # tdSql.query(" select sample(c1 , 1) + 2 from t1 ") + # tdSql.query(f"select sample(c1 , 1) + 2 from {dbname}.t1 ") err41 = {"alias": "+ avg(c1)"} # self.checksample(**err41) # mix with arithmetic 2 # err42 = {"alias": ", c1"} # self.checksample(**err42) - tdSql.query("select sample( c1 , 1 ) , c1 from t1") + tdSql.query(f"select sample( c1 , 1 ) , c1 from {DBNAME}.t1") # mix with other col # err43 = {"table_expr": "stb1"} # self.checksample(**err43) # select stb directly @@ -510,14 +505,14 @@ class TDTestCase: # "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" # } # self.checksample(**err44) # stb join - tdSql.query("select sample( stb1.c1 , 1 ) from stb1, stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts") + tdSql.query(f"select sample( stb1.c1 , 1 ) from {DBNAME}.stb1 stb1, {DBNAME}.stb2 stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts") # err45 = { # "condition": "where ts>0 and ts < now interval(1h) fill(next)" # } # self.checksample(**err45) # interval - tdSql.error("select sample( c1 , 1 ) from t1 where ts>0 and ts < now interval(1h) fill(next)") + tdSql.error(f"select sample( c1 , 1 ) from {DBNAME}.t1 where ts>0 and ts < now interval(1h) fill(next)") err46 = { - "table_expr": "t1", + "table_expr": f"{DBNAME}.t1", "condition": "group by c6" } # self.checksample(**err46) # group by normal col @@ -563,49 +558,45 @@ class TDTestCase: pass - def sample_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + def sample_test_data(self, tbnum:int, data_row:int, basetime:int, dbname="db") -> None : for i in range(tbnum): for j in range(data_row): tdSql.execute( - f"insert into t{i} values (" + f"insert into {dbname}.t{i} values (" f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" ) tdSql.execute( - f"insert into t{i} values (" + f"insert into {dbname}.t{i} values (" f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" ) tdSql.execute( - f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + f"insert into {dbname}.tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" ) pass - def sample_test_table(self,tbnum: int) -> None : - tdSql.execute("drop database if exists db") - tdSql.execute("create database if not exists db keep 3650") - tdSql.execute("use db") + def sample_test_table(self,tbnum: int, dbname="db") -> None : + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname} keep 3650") tdSql.execute( - "create stable db.stb1 (\ + f"create stable {dbname}.stb1 (\ ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ ) \ tags(st1 int)" ) tdSql.execute( - "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + f"create stable {dbname}.stb2 (ts timestamp, c1 int) tags(st2 int)" ) for i in range(tbnum): - tdSql.execute(f"create table t{i} using stb1 tags({i})") - tdSql.execute(f"create table tt{i} using stb2 tags({i})") - - pass - + tdSql.execute(f"create table {dbname}.t{i} using {dbname}.stb1 tags({i})") + tdSql.execute(f"create table {dbname}.tt{i} using {dbname}.stb2 tags({i})") def check_sample(self , sample_query , origin_query ): @@ -626,45 +617,43 @@ class TDTestCase: else: tdLog.exit(" sample data is not in datas groups ,failed sql is : %s" % sample_query ) - - def basic_sample_query(self): - tdSql.execute(" drop database if exists db ") - tdSql.execute(" create database if not exists db duration 300d ") - tdSql.execute(" use db ") + def basic_sample_query(self, dbname="db"): + tdSql.execute(f" drop database if exists {dbname} ") + tdSql.execute(f" create database if not exists {dbname} duration 300d ") tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -683,116 +672,116 @@ class TDTestCase: # basic query for sample # params test for all - tdSql.error(" select sample(c1,c1) from t1 ") - tdSql.error(" select sample(c1,now) from t1 ") - tdSql.error(" select sample(c1,tbname) from t1 ") - tdSql.error(" select sample(c1,ts) from t1 ") - tdSql.error(" select sample(c1,false) from t1 ") - tdSql.query(" select sample(123,1) from t1 ") - - tdSql.query(" select sample(c1,2) from t1 ") + tdSql.error(f"select sample(c1,c1) from {dbname}.t1 ") + tdSql.error(f"select sample(c1,now) from {dbname}.t1 ") + tdSql.error(f"select sample(c1,tbname) from {dbname}.t1 ") + tdSql.error(f"select sample(c1,ts) from {dbname}.t1 ") + tdSql.error(f"select sample(c1,false) from {dbname}.t1 ") + tdSql.query(f"select sample(123,1) from {dbname}.t1 ") + + tdSql.query(f"select sample(c1,2) from {dbname}.t1 ") tdSql.checkRows(2) - tdSql.query(" select sample(c1,10) from t1 ") + tdSql.query(f"select sample(c1,10) from {dbname}.t1 ") tdSql.checkRows(9) - tdSql.query(" select sample(c8,10) from t1 ") + tdSql.query(f"select sample(c8,10) from {dbname}.t1 ") tdSql.checkRows(9) - tdSql.query(" select sample(c1,999) from t1 ") + tdSql.query(f"select sample(c1,999) from {dbname}.t1 ") tdSql.checkRows(9) - tdSql.query(" select sample(c1,1000) from t1 ") + tdSql.query(f"select sample(c1,1000) from {dbname}.t1 ") tdSql.checkRows(9) - tdSql.query(" select sample(c8,1000) from t1 ") + tdSql.query(f"select sample(c8,1000) from {dbname}.t1 ") tdSql.checkRows(9) - tdSql.error(" select sample(c1,-1) from t1 ") + tdSql.error(f"select sample(c1,-1) from {dbname}.t1 ") # bug need fix - # tdSql.query("select sample(c1 ,2) , 123 from stb1;") + # tdSql.query(f"select sample(c1 ,2) , 123 from {dbname}.stb1;") # all type support - tdSql.query(" select sample(c1 , 20 ) from ct4 ") + tdSql.query(f"select sample(c1 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - tdSql.query(" select sample(c2 , 20 ) from ct4 ") + tdSql.query(f"select sample(c2 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - tdSql.query(" select sample(c3 , 20 ) from ct4 ") + tdSql.query(f"select sample(c3 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - tdSql.query(" select sample(c4 , 20 ) from ct4 ") + tdSql.query(f"select sample(c4 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - tdSql.query(" select sample(c5 , 20 ) from ct4 ") + tdSql.query(f"select sample(c5 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - tdSql.query(" select sample(c6 , 20 ) from ct4 ") + tdSql.query(f"select sample(c6 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - tdSql.query(" select sample(c7 , 20 ) from ct4 ") + tdSql.query(f"select sample(c7 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - tdSql.query(" select sample(c8 , 20 ) from ct4 ") + tdSql.query(f"select sample(c8 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - tdSql.query(" select sample(c9 , 20 ) from ct4 ") + tdSql.query(f"select sample(c9 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - tdSql.query(" select sample(c10 , 20 ) from ct4 ") + tdSql.query(f"select sample(c10 , 20 ) from {dbname}.ct4 ") tdSql.checkRows(9) - # tdSql.query(" select sample(t1 , 20 ) from ct1 ") + # tdSql.query(f"select sample(t1 , 20 ) from {dbname}.ct1 ") # tdSql.checkRows(13) # filter data - tdSql.query(" select sample(c1, 20 ) from t1 where c1 is null ") + tdSql.query(f"select sample(c1, 20 ) from {dbname}.t1 where c1 is null ") tdSql.checkRows(1) - tdSql.query(" select sample(c1, 20 ) from t1 where c1 =6 ") + tdSql.query(f"select sample(c1, 20 ) from {dbname}.t1 where c1 =6 ") tdSql.checkRows(1) - tdSql.query(" select sample(c1, 20 ) from t1 where c1 > 6 ") + tdSql.query(f"select sample(c1, 20 ) from {dbname}.t1 where c1 > 6 ") tdSql.checkRows(3) - self.check_sample("select sample(c1, 20 ) from t1 where c1 > 6" , "select c1 from t1 where c1 > 6") + self.check_sample(f"select sample(c1, 20 ) from {dbname}.t1 where c1 > 6" , f"select c1 from {dbname}.t1 where c1 > 6") - tdSql.query(" select sample( c1 , 1 ) from t1 where c1 in (0, 1,2) ") + tdSql.query(f"select sample( c1 , 1 ) from {dbname}.t1 where c1 in (0, 1,2) ") tdSql.checkRows(1) - tdSql.query("select sample( c1 ,3 ) from t1 where c1 between 1 and 10 ") + tdSql.query(f"select sample( c1 ,3 ) from {dbname}.t1 where c1 between 1 and 10 ") tdSql.checkRows(3) - self.check_sample("select sample( c1 ,3 ) from t1 where c1 between 1 and 10" ,"select c1 from t1 where c1 between 1 and 10") + self.check_sample(f"select sample( c1 ,3 ) from {dbname}.t1 where c1 between 1 and 10" ,f"select c1 from {dbname}.t1 where c1 between 1 and 10") # join - tdSql.query("select sample( ct4.c1 , 1 ) from ct1, ct4 where ct4.ts=ct1.ts") + tdSql.query(f"select sample( ct4.c1 , 1 ) from {dbname}.ct1 ct1, {dbname}.ct4 ct4 where ct4.ts=ct1.ts") # partition by tbname - tdSql.query("select sample(c1,2) from stb1 partition by tbname") + tdSql.query(f"select sample(c1,2) from {dbname}.stb1 partition by tbname") tdSql.checkRows(4) - self.check_sample("select sample(c1,2) from stb1 partition by tbname" , "select c1 from stb1 partition by tbname") + self.check_sample(f"select sample(c1,2) from {dbname}.stb1 partition by tbname" , f"select c1 from {dbname}.stb1 partition by tbname") # nest query - # tdSql.query("select sample(c1,2) from (select c1 from t1); ") + # tdSql.query(f"select sample(c1,2) from (select c1 from {dbname}.t1); ") # tdSql.checkRows(2) # union all - tdSql.query("select sample(c1,2) from t1 union all select sample(c1,3) from t1") + tdSql.query(f"select sample(c1,2) from {dbname}.t1 union all select sample(c1,3) from {dbname}.t1") tdSql.checkRows(5) # fill interval # not support mix with other function - tdSql.error("select top(c1,2) , sample(c1,2) from ct1") - tdSql.error("select max(c1) , sample(c1,2) from ct1") - tdSql.query("select c1 , sample(c1,2) from ct1") + tdSql.error(f"select top(c1,2) , sample(c1,2) from {dbname}.ct1") + tdSql.error(f"select max(c1) , sample(c1,2) from {dbname}.ct1") + tdSql.query(f"select c1 , sample(c1,2) from {dbname}.ct1") # bug for mix with scalar - tdSql.query("select 123 , sample(c1,100) from ct1") - tdSql.query("select sample(c1,100)+2 from ct1") - tdSql.query("select abs(sample(c1,100)) from ct1") + tdSql.query(f"select 123 , sample(c1,100) from {dbname}.ct1") + tdSql.query(f"select sample(c1,100)+2 from {dbname}.ct1") + tdSql.query(f"select abs(sample(c1,100)) from {dbname}.ct1") - def sample_test_run(self) : + def sample_test_run(self, dbname="db") : tdLog.printNoPrefix("==========support sample function==========") tbnum = 10 nowtime = int(round(time.time() * 1000)) @@ -805,28 +794,28 @@ class TDTestCase: tdLog.printNoPrefix("######## insert only NULL test:") for i in range(tbnum): - tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") - tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime + 5})") self.sample_current_query() self.sample_error_query() tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") - # self.sample_test_table(tbnum) - # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " - # f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") - # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " - # f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") - # self.sample_current_query() - # self.sample_error_query() + self.sample_test_table(tbnum) + tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + self.sample_current_query() + self.sample_error_query() tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") - # self.sample_test_table(tbnum) - # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " - # f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") - # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " - # f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") - # self.sample_current_query() - # self.sample_error_query() + self.sample_test_table(tbnum) + tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + self.sample_current_query() + self.sample_error_query() tdLog.printNoPrefix("######## insert data without NULL data test:") self.sample_test_table(tbnum) @@ -837,16 +826,16 @@ class TDTestCase: tdLog.printNoPrefix("######## insert data mix with NULL test:") for i in range(tbnum): - tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") - tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") - tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") self.sample_current_query() self.sample_error_query() tdLog.printNoPrefix("######## check after WAL test:") - tdSql.query("select * from information_schema.ins_dnodes") + tdSql.query(f"select * from information_schema.ins_dnodes") index = tdSql.getData(0, 0) tdDnodes.stop(index) tdDnodes.start(index) @@ -855,25 +844,25 @@ class TDTestCase: self.basic_sample_query() - def sample_big_data(self): - tdSql.execute("create database sample_db") + def sample_big_data(self, dbname="sample_db"): + tdSql.execute(f"create database {dbname}") tdSql.execute("use sample_db") - tdSql.execute("create stable st (ts timestamp ,c1 int ) tags(ind int)" ) - tdSql.execute("create table sub_tb using st tags(1)") + tdSql.execute(f"create stable {dbname}.st (ts timestamp ,c1 int ) tags(ind int)" ) + tdSql.execute(f"create table {dbname}.sub_tb using {dbname}.st tags(1)") for i in range(2000): ts = self.ts+i*10 - tdSql.execute(f"insert into sub_tb values({ts} ,{i})") + tdSql.execute(f"insert into {dbname}.sub_tb values({ts} ,{i})") - tdSql.query("select count(*) from st") + tdSql.query(f"select count(*) from {dbname}.st") tdSql.checkData(0,0,2000) - tdSql.query("select sample(c1 ,1000) from st") + tdSql.query(f"select sample(c1 ,1000) from {dbname}.st") tdSql.checkRows(1000) # bug need fix tdSql.query("select c1 ,t1, sample(c1,2) from db.stb1 partition by c1 ") tdSql.query("select sample(c1,2) from db.stb1 partition by c1 ") - # tdSql.query("select c1 ,ind, sample(c1,2) from sample_db.st partition by c1 ") + tdSql.query("select c1 ,ind, sample(c1,2) from sample_db.st partition by c1 ") def run(self): import traceback diff --git a/tests/system-test/2-query/sin.py b/tests/system-test/2-query/sin.py index 7cb559c510f637c25fef6e7573ea44c92a2051bc..a1ba3354879eb9e1e0abe66ae445f7604734ad66 100644 --- a/tests/system-test/2-query/sin.py +++ b/tests/system-test/2-query/sin.py @@ -9,48 +9,46 @@ from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} + def init(self, conn, powSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - - def prepare_datas(self): + + def prepare_datas(self, dbname="db"): tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) - + tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -65,14 +63,15 @@ class TDTestCase: ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ''' ) - + def check_result_auto_sin(self ,origin_query , pow_query): pow_result = tdSql.getResult(pow_query) + origin_result = tdSql.getResult(origin_query) auto_result =[] - + for row in origin_result: row_check = [] for elem in row: @@ -82,190 +81,179 @@ class TDTestCase: elem = math.sin(elem) row_check.append(elem) auto_result.append(row_check) - - check_status = True - + tdSql.query(pow_query) for row_index , row in enumerate(pow_result): for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): - check_status = False - elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): - check_status = False - else: - pass - if not check_status: - tdLog.notice("sin function value has not as expected , sql is \"%s\" "%pow_query ) - sys.exit(1) - else: - tdLog.info("sin value check pass , it work as expected ,sql is \"%s\" "%pow_query ) - - def test_errors(self): + tdSql.checkData(row_index ,col_index ,auto_result[row_index][col_index]) + + + def test_errors(self, dbname="db"): error_sql_lists = [ - "select sin from t1", - # "select sin(-+--+c1 ) from t1", - # "select +-sin(c1) from t1", - # "select ++-sin(c1) from t1", - # "select ++--sin(c1) from t1", - # "select - -sin(c1)*0 from t1", - # "select sin(tbname+1) from t1 ", - "select sin(123--123)==1 from t1", - "select sin(c1) as 'd1' from t1", - "select sin(c1 ,c2) from t1", - "select sin(c1 ,NULL ) from t1", - "select sin(,) from t1;", - "select sin(sin(c1) ab from t1)", - "select sin(c1 ) as int from t1", - "select sin from stb1", - # "select sin(-+--+c1) from stb1", - # "select +-sin(c1) from stb1", - # "select ++-sin(c1) from stb1", - # "select ++--sin(c1) from stb1", - # "select - -sin(c1)*0 from stb1", - # "select sin(tbname+1) from stb1 ", - "select sin(123--123)==1 from stb1", - "select sin(c1) as 'd1' from stb1", - "select sin(c1 ,c2 ) from stb1", - "select sin(c1 ,NULL) from stb1", - "select sin(,) from stb1;", - "select sin(sin(c1) ab from stb1)", - "select sin(c1) as int from stb1" + f"select sin from {dbname}.t1", + # f"select sin(-+--+c1 ) from {dbname}.t1", + # f"select +-sin(c1) from {dbname}.t1", + # f"select ++-sin(c1) from {dbname}.t1", + # f"select ++--sin(c1) from {dbname}.t1", + # f"select - -sin(c1)*0 from {dbname}.t1", + # f"select sin(tbname+1) from {dbname}.t1 ", + f"select sin(123--123)==1 from {dbname}.t1", + f"select sin(c1) as 'd1' from {dbname}.t1", + f"select sin(c1 ,c2) from {dbname}.t1", + f"select sin(c1 ,NULL ) from {dbname}.t1", + f"select sin(,) from {dbname}.t1;", + f"select sin(sin(c1) ab from {dbname}.t1)", + f"select sin(c1 ) as int from {dbname}.t1", + f"select sin from {dbname}.stb1", + # f"select sin(-+--+c1) from {dbname}.stb1", + # f"select +-sin(c1) from {dbname}.stb1", + # f"select ++-sin(c1) from {dbname}.stb1", + # f"select ++--sin(c1) from {dbname}.stb1", + # f"select - -sin(c1)*0 from {dbname}.stb1", + # f"select sin(tbname+1) from {dbname}.stb1 ", + f"select sin(123--123)==1 from {dbname}.stb1", + f"select sin(c1) as 'd1' from {dbname}.stb1", + f"select sin(c1 ,c2 ) from {dbname}.stb1", + f"select sin(c1 ,NULL) from {dbname}.stb1", + f"select sin(,) from {dbname}.stb1;", + f"select sin(sin(c1) ab from {dbname}.stb1)", + f"select sin(c1) as int from {dbname}.stb1" ] for error_sql in error_sql_lists: tdSql.error(error_sql) - - def support_types(self): + + def support_types(self, dbname="db"): type_error_sql_lists = [ - "select sin(ts) from t1" , - "select sin(c7) from t1", - "select sin(c8) from t1", - "select sin(c9) from t1", - "select sin(ts) from ct1" , - "select sin(c7) from ct1", - "select sin(c8) from ct1", - "select sin(c9) from ct1", - "select sin(ts) from ct3" , - "select sin(c7) from ct3", - "select sin(c8) from ct3", - "select sin(c9) from ct3", - "select sin(ts) from ct4" , - "select sin(c7) from ct4", - "select sin(c8) from ct4", - "select sin(c9) from ct4", - "select sin(ts) from stb1" , - "select sin(c7) from stb1", - "select sin(c8) from stb1", - "select sin(c9) from stb1" , - - "select sin(ts) from stbbb1" , - "select sin(c7) from stbbb1", - - "select sin(ts) from tbname", - "select sin(c9) from tbname" + f"select sin(ts) from {dbname}.t1" , + f"select sin(c7) from {dbname}.t1", + f"select sin(c8) from {dbname}.t1", + f"select sin(c9) from {dbname}.t1", + f"select sin(ts) from {dbname}.ct1" , + f"select sin(c7) from {dbname}.ct1", + f"select sin(c8) from {dbname}.ct1", + f"select sin(c9) from {dbname}.ct1", + f"select sin(ts) from {dbname}.ct3" , + f"select sin(c7) from {dbname}.ct3", + f"select sin(c8) from {dbname}.ct3", + f"select sin(c9) from {dbname}.ct3", + f"select sin(ts) from {dbname}.ct4" , + f"select sin(c7) from {dbname}.ct4", + f"select sin(c8) from {dbname}.ct4", + f"select sin(c9) from {dbname}.ct4", + f"select sin(ts) from {dbname}.stb1" , + f"select sin(c7) from {dbname}.stb1", + f"select sin(c8) from {dbname}.stb1", + f"select sin(c9) from {dbname}.stb1" , + + f"select sin(ts) from {dbname}.stbbb1" , + f"select sin(c7) from {dbname}.stbbb1", + + f"select sin(ts) from {dbname}.tbname", + f"select sin(c9) from {dbname}.tbname" ] - + for type_sql in type_error_sql_lists: tdSql.error(type_sql) - - + + type_sql_lists = [ - "select sin(c1) from t1", - "select sin(c2) from t1", - "select sin(c3) from t1", - "select sin(c4) from t1", - "select sin(c5) from t1", - "select sin(c6) from t1", - - "select sin(c1) from ct1", - "select sin(c2) from ct1", - "select sin(c3) from ct1", - "select sin(c4) from ct1", - "select sin(c5) from ct1", - "select sin(c6) from ct1", - - "select sin(c1) from ct3", - "select sin(c2) from ct3", - "select sin(c3) from ct3", - "select sin(c4) from ct3", - "select sin(c5) from ct3", - "select sin(c6) from ct3", - - "select sin(c1) from stb1", - "select sin(c2) from stb1", - "select sin(c3) from stb1", - "select sin(c4) from stb1", - "select sin(c5) from stb1", - "select sin(c6) from stb1", - - "select sin(c6) as alisb from stb1", - "select sin(c6) alisb from stb1", + f"select sin(c1) from {dbname}.t1", + f"select sin(c2) from {dbname}.t1", + f"select sin(c3) from {dbname}.t1", + f"select sin(c4) from {dbname}.t1", + f"select sin(c5) from {dbname}.t1", + f"select sin(c6) from {dbname}.t1", + + f"select sin(c1) from {dbname}.ct1", + f"select sin(c2) from {dbname}.ct1", + f"select sin(c3) from {dbname}.ct1", + f"select sin(c4) from {dbname}.ct1", + f"select sin(c5) from {dbname}.ct1", + f"select sin(c6) from {dbname}.ct1", + + f"select sin(c1) from {dbname}.ct3", + f"select sin(c2) from {dbname}.ct3", + f"select sin(c3) from {dbname}.ct3", + f"select sin(c4) from {dbname}.ct3", + f"select sin(c5) from {dbname}.ct3", + f"select sin(c6) from {dbname}.ct3", + + f"select sin(c1) from {dbname}.stb1", + f"select sin(c2) from {dbname}.stb1", + f"select sin(c3) from {dbname}.stb1", + f"select sin(c4) from {dbname}.stb1", + f"select sin(c5) from {dbname}.stb1", + f"select sin(c6) from {dbname}.stb1", + + f"select sin(c6) as alisb from {dbname}.stb1", + f"select sin(c6) alisb from {dbname}.stb1", ] for type_sql in type_sql_lists: tdSql.query(type_sql) - - def basic_sin_function(self): - # basic query - tdSql.query("select c1 from ct3") + def basic_sin_function(self, dbname="db"): + + # basic query + tdSql.query(f"select c1 from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select c1 from t1") + tdSql.query(f"select c1 from {dbname}.t1") tdSql.checkRows(12) - tdSql.query("select c1 from stb1") + tdSql.query(f"select c1 from {dbname}.stb1") tdSql.checkRows(25) # used for empty table , ct3 is empty - tdSql.query("select sin(c1) from ct3") + tdSql.query(f"select sin(c1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sin(c2) from ct3") + tdSql.query(f"select sin(c2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sin(c3) from ct3") + tdSql.query(f"select sin(c3) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sin(c4) from ct3") + tdSql.query(f"select sin(c4) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sin(c5) from ct3") + tdSql.query(f"select sin(c5) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sin(c6) from ct3") + tdSql.query(f"select sin(c6) from {dbname}.ct3") tdSql.checkRows(0) # # used for regular table - tdSql.query("select sin(c1) from t1") + tdSql.query(f"select sin(c1) from {dbname}.t1") tdSql.checkData(0, 0, None) tdSql.checkData(1 , 0, 0.841470985) tdSql.checkData(3 , 0, 0.141120008) tdSql.checkData(5 , 0, None) - tdSql.query("select c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 4, 1.11000) tdSql.checkData(3, 3, 33) tdSql.checkData(5, 4, None) - tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 5, 1.11000) tdSql.checkData(3, 4, 33) tdSql.checkData(5, 5, None) - self.check_result_auto_sin( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)), sin(abs(c5)) from t1") - + self.check_result_auto_sin( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.t1", f"select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)), sin(abs(c5)) from {dbname}.t1") + # used for sub table - tdSql.query("select c2 ,sin(c2) from ct1") + tdSql.query(f"select c2 ,sin(c2) from {dbname}.ct1") tdSql.checkData(0, 1, -0.220708349) tdSql.checkData(1 , 1, -0.556921845) tdSql.checkData(3 , 1, -0.798311364) tdSql.checkData(4 , 1, 0.000000000) - tdSql.query("select c1, c5 ,sin(c5) from ct4") + tdSql.query(f"select c1, c5 ,sin(c5) from {dbname}.ct4") tdSql.checkData(0 , 2, None) tdSql.checkData(1 , 2, 0.518228108) tdSql.checkData(2 , 2, 0.996475613) tdSql.checkData(3 , 2, 0.367960369) tdSql.checkData(5 , 2, None) - self.check_result_auto_sin( "select c1, c2, c3 , c4, c5 from ct1", "select sin(c1), sin(c2) ,sin(c3), sin(c4), sin(c5) from ct1") - + self.check_result_auto_sin( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select sin(c1), sin(c2) ,sin(c3), sin(c4), sin(c5) from {dbname}.ct1") + # nest query for sin functions - tdSql.query("select c4 , sin(c4) ,sin(sin(c4)) , sin(sin(sin(c4))) from ct1;") + tdSql.query(f"select c4 , sin(c4) ,sin(sin(c4)) , sin(sin(sin(c4))) from {dbname}.ct1;") tdSql.checkData(0 , 0 , 88) tdSql.checkData(0 , 1 , 0.035398303) tdSql.checkData(0 , 2 , 0.035390911) @@ -281,52 +269,52 @@ class TDTestCase: tdSql.checkData(11 , 2 , 0.841042171) tdSql.checkData(11 , 3 , 0.745338326) - # used for stable table - - tdSql.query("select sin(c1) from stb1") + # used for stable table + + tdSql.query(f"select sin(c1) from {dbname}.stb1") tdSql.checkRows(25) - + # used for not exists table - tdSql.error("select sin(c1) from stbbb1") - tdSql.error("select sin(c1) from tbname") - tdSql.error("select sin(c1) from ct5") + tdSql.error(f"select sin(c1) from {dbname}.stbbb1") + tdSql.error(f"select sin(c1) from {dbname}.tbname") + tdSql.error(f"select sin(c1) from {dbname}.ct5") + + # mix with common col + tdSql.query(f"select c1, sin(c1) from {dbname}.ct1") + tdSql.query(f"select c2, sin(c2) from {dbname}.ct4") - # mix with common col - tdSql.query("select c1, sin(c1) from ct1") - tdSql.query("select c2, sin(c2) from ct4") - # mix with common functions - tdSql.query("select c1, sin(c1),sin(c1), sin(sin(c1)) from ct4 ") + tdSql.query(f"select c1, sin(c1),sin(c1), sin(sin(c1)) from {dbname}.ct4 ") tdSql.checkData(0 , 0 ,None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 2 ,None) tdSql.checkData(0 , 3 ,None) - + tdSql.checkData(3 , 0 , 6) tdSql.checkData(3 , 1 ,-0.279415498) tdSql.checkData(3 , 2 ,-0.279415498) tdSql.checkData(3 , 3 ,-0.275793863) - tdSql.query("select c1, sin(c1),c5, floor(c5) from stb1 ") + tdSql.query(f"select c1, sin(c1),c5, floor(c5) from {dbname}.stb1 ") # # mix with agg functions , not support - tdSql.error("select c1, sin(c1),c5, count(c5) from stb1 ") - tdSql.error("select c1, sin(c1),c5, count(c5) from ct1 ") - tdSql.error("select sin(c1), count(c5) from stb1 ") - tdSql.error("select sin(c1), count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from stb1 ") + tdSql.error(f"select c1, sin(c1),c5, count(c5) from {dbname}.stb1 ") + tdSql.error(f"select c1, sin(c1),c5, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select sin(c1), count(c5) from {dbname}.stb1 ") + tdSql.error(f"select sin(c1), count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ") # agg functions mix with agg functions - tdSql.query("select max(c5), count(c5) from stb1") - tdSql.query("select max(c5), count(c5) from ct1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1") - - # # bug fix for compute - tdSql.query("select c1, sin(c1) -0 ,sin(c1-4)-0 from ct4 ") + + # # bug fix for compute + tdSql.query(f"select c1, sin(c1) -0 ,sin(c1-4)-0 from {dbname}.ct4 ") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -334,7 +322,7 @@ class TDTestCase: tdSql.checkData(1, 1, 0.989358247) tdSql.checkData(1, 2, -0.756802495) - tdSql.query(" select c1, sin(c1) -0 ,sin(c1-0.1)-0.1 from ct4") + tdSql.query(f"select c1, sin(c1) -0 ,sin(c1-0.1)-0.1 from {dbname}.ct4") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -342,35 +330,34 @@ class TDTestCase: tdSql.checkData(1, 1, 0.989358247) tdSql.checkData(1, 2, 0.898941342) - tdSql.query("select c1, sin(c1), c2, sin(c2), c3, sin(c3) from ct1") + tdSql.query(f"select c1, sin(c1), c2, sin(c2), c3, sin(c3) from {dbname}.ct1") - def test_big_number(self): + def test_big_number(self, dbname="db"): - tdSql.query("select c1, sin(100000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, sin(100000000) from {dbname}.ct1") # bigint to double data overflow tdSql.checkData(4, 1, math.sin(100000000)) - tdSql.query("select c1, sin(10000000000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, sin(10000000000000) from {dbname}.ct1") # bigint to double data overflow tdSql.checkData(4, 1, math.sin(10000000000000)) - tdSql.query("select c1, sin(10000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, sin(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, sin(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, sin(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(1, 1, math.sin(10000000000000000000000000.0)) - tdSql.query("select c1, sin(10000000000000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, sin(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, sin(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, sin(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(4, 1, math.sin(10000000000000000000000000000000000.0)) - tdSql.query("select c1, sin(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, sin(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, sin(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, sin(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(4, 1, math.sin(10000000000000000000000000000000000000000.0)) - tdSql.query("select c1, sin(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, sin(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow - def abs_func_filter(self): - tdSql.execute("use db") - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from ct4 where c1>5 ") + def abs_func_filter(self, dbname="db"): + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from {dbname}.ct4 where c1>5 ") tdSql.checkRows(3) tdSql.checkData(0,0,8) tdSql.checkData(0,1,8.000000000) @@ -378,7 +365,7 @@ class TDTestCase: tdSql.checkData(0,3,7.900000000) tdSql.checkData(0,4,1.000000000) - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from ct4 where c1=5 ") + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from {dbname}.ct4 where c1=5 ") tdSql.checkRows(1) tdSql.checkData(0,0,5) tdSql.checkData(0,1,5.000000000) @@ -386,7 +373,7 @@ class TDTestCase: tdSql.checkData(0,3,4.900000000) tdSql.checkData(0,4,-1.000000000) - tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from ct4 where c1=sin(c1) limit 1 ") + tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from {dbname}.ct4 where c1=sin(c1) limit 1 ") tdSql.checkRows(1) tdSql.checkData(0,0,0) tdSql.checkData(0,1,0) @@ -394,45 +381,40 @@ class TDTestCase: tdSql.checkData(0,3,0.000000000) tdSql.checkData(0,4,-0.100000000) tdSql.checkData(0,5,0.000000000) - - def pow_Arithmetic(self): - pass - - def check_boundary_values(self): + + def check_boundary_values(self, dbname="testdb"): PI=3.1415926 - tdSql.execute("drop database if exists bound_test") - tdSql.execute("create database if not exists bound_test") - time.sleep(3) - tdSql.execute("use bound_test") + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname}") tdSql.execute( - "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" ) - tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )') tdSql.execute( - f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.error( - f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - self.check_result_auto_sin( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)), sin(abs(c5)) from sub1_bound") - - self.check_result_auto_sin( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select sin(c1), sin(c2) ,sin(c3), sin(c3), sin(c2) ,sin(c1) from sub1_bound") + self.check_result_auto_sin( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.sub1_bound ", f"select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)) from {dbname}.sub1_bound") + + self.check_result_auto_sin( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select sin(c1), sin(c2) ,sin(c3), sin(c3), sin(c2) ,sin(c1) from {dbname}.sub1_bound") + + self.check_result_auto_sin(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select sin(abs(c1)) from {dbname}.sub1_bound" ) - self.check_result_auto_sin("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select sin(abs(c1)) from sub1_bound" ) - # check basic elem for table per row - tdSql.query("select sin(abs(c1)) ,sin(abs(c2)) , sin(abs(c3)) , sin(abs(c4)), sin(abs(c5)), sin(abs(c6)) from sub1_bound ") + tdSql.query(f"select sin(abs(c1)) ,sin(abs(c2)) , sin(abs(c3)) , sin(abs(c4)), sin(abs(c5)), sin(abs(c6)) from {dbname}.sub1_bound ") tdSql.checkData(0,0,math.sin(2147483647)) tdSql.checkData(0,1,math.sin(9223372036854775807)) tdSql.checkData(0,2,math.sin(32767)) @@ -450,83 +432,79 @@ class TDTestCase: tdSql.checkData(3,4,math.sin(339999995214436424907732413799364296704.00000)) # check + - * / in functions - tdSql.query("select sin(abs(c1+1)) ,sin(abs(c2)) , sin(abs(c3*1)) , sin(abs(c4/2)), sin(abs(c5))/2, sin(abs(c6)) from sub1_bound ") + tdSql.query(f"select sin(abs(c1+1)) ,sin(abs(c2)) , sin(abs(c3*1)) , sin(abs(c4/2)), sin(abs(c5))/2, sin(abs(c6)) from {dbname}.sub1_bound ") tdSql.checkData(0,0,math.sin(2147483648.000000000)) tdSql.checkData(0,1,math.sin(9223372036854775807)) tdSql.checkData(0,2,math.sin(32767.000000000)) tdSql.checkData(0,3,math.sin(63.500000000)) - tdSql.execute("create stable st (ts timestamp, num1 float, num2 double) tags (t1 int);") - tdSql.execute(f'create table tb1 using st tags (1)') - tdSql.execute(f'create table tb2 using st tags (2)') - tdSql.execute(f'create table tb3 using st tags (3)') - tdSql.execute('insert into tb1 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 )) - tdSql.execute('insert into tb1 values (now()-30s, {}, {})'.format(PI ,PI )) - tdSql.execute('insert into tb1 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5)) - tdSql.execute('insert into tb1 values (now()-10s, {}, {})'.format(PI*2 ,PI*2)) - tdSql.execute('insert into tb1 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5)) - - tdSql.execute('insert into tb2 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 )) - tdSql.execute('insert into tb2 values (now()-30s, {}, {})'.format(PI ,PI )) - tdSql.execute('insert into tb2 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5)) - tdSql.execute('insert into tb2 values (now()-10s, {}, {})'.format(PI*2 ,PI*2)) - tdSql.execute('insert into tb2 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5)) - - for i in range(100): - tdSql.execute('insert into tb3 values (now()+{}s, {}, {})'.format(i,PI*(5+i)/2 ,PI*(5+i)/2)) - - self.check_result_auto_sin("select num1,num2 from tb3;" , "select sin(num1),sin(num2) from tb3") - - def support_super_table_test(self): - tdSql.execute(" use db ") - self.check_result_auto_sin( " select c5 from stb1 order by ts " , "select sin(c5) from stb1 order by ts" ) - self.check_result_auto_sin( " select c5 from stb1 order by tbname " , "select sin(c5) from stb1 order by tbname" ) - self.check_result_auto_sin( " select c5 from stb1 where c1 > 0 order by tbname " , "select sin(c5) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_sin( " select c5 from stb1 where c1 > 0 order by tbname " , "select sin(c5) from stb1 where c1 > 0 order by tbname" ) - - self.check_result_auto_sin( " select t1,c5 from stb1 order by ts " , "select sin(t1), sin(c5) from stb1 order by ts" ) - self.check_result_auto_sin( " select t1,c5 from stb1 order by tbname " , "select sin(t1) ,sin(c5) from stb1 order by tbname" ) - self.check_result_auto_sin( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sin(t1) ,sin(c5) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_sin( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sin(t1) , sin(c5) from stb1 where c1 > 0 order by tbname" ) - pass - + tdSql.execute(f"create stable {dbname}.st (ts timestamp, num1 float, num2 double) tags (t1 int);") + tdSql.execute(f'create table {dbname}.tb1 using {dbname}.st tags (1)') + tdSql.execute(f'create table {dbname}.tb2 using {dbname}.st tags (2)') + tdSql.execute(f'create table {dbname}.tb3 using {dbname}.st tags (3)') + tdSql.execute(f'insert into {dbname}.tb1 values (now()-40s, {PI/2}, {PI/2})') + tdSql.execute(f'insert into {dbname}.tb1 values (now()-30s, {PI}, {PI})') + tdSql.execute(f'insert into {dbname}.tb1 values (now()-20s, {PI*1.5}, {PI*1.5})') + tdSql.execute(f'insert into {dbname}.tb1 values (now()-10s, {PI*2}, {PI*2})') + tdSql.execute(f'insert into {dbname}.tb1 values (now(), {PI*2.5}, {PI*2.5})') + + tdSql.execute(f'insert into {dbname}.tb2 values (now()-40s, {PI/2}, {PI/2})') + tdSql.execute(f'insert into {dbname}.tb2 values (now()-30s, {PI}, {PI})') + tdSql.execute(f'insert into {dbname}.tb2 values (now()-20s, {PI*1.5}, {PI*1.5})') + tdSql.execute(f'insert into {dbname}.tb2 values (now()-10s, {PI*2}, {PI*2})') + tdSql.execute(f'insert into {dbname}.tb2 values (now(), {PI*2.5}, {PI*2.5})') + + self.check_result_auto_sin(f"select num1,num2 from {dbname}.tb3;" , f"select sin(num1),sin(num2) from {dbname}.tb3") + + def support_super_table_test(self, dbname="db"): + self.check_result_auto_sin( f"select c5 from {dbname}.stb1 order by ts " , f"select sin(c5) from {dbname}.stb1 order by ts" ) + self.check_result_auto_sin( f"select c5 from {dbname}.stb1 order by tbname " , f"select sin(c5) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_sin( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_sin( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + + self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select sin(t1), sin(c5) from {dbname}.stb1 order by ts" ) + self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select sin(t1) ,sin(c5) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(t1) ,sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(t1) , sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() tdLog.printNoPrefix("==========step1:create table ==============") - + self.prepare_datas() - tdLog.printNoPrefix("==========step2:test errors ==============") + tdLog.printNoPrefix("==========step2:test errors ==============") self.test_errors() - - tdLog.printNoPrefix("==========step3:support types ============") + + tdLog.printNoPrefix("==========step3:support types ============") self.support_types() - tdLog.printNoPrefix("==========step4: sin basic query ============") + tdLog.printNoPrefix("==========step4: sin basic query ============") self.basic_sin_function() - tdLog.printNoPrefix("==========step5: big number sin query ============") - - self.test_big_number() + tdLog.printNoPrefix("==========step5: sin filter query ============") + self.abs_func_filter() - tdLog.printNoPrefix("==========step6: sin boundary query ============") + tdLog.printNoPrefix("==========step6: big number sin query ============") - self.check_boundary_values() + self.test_big_number() + - tdLog.printNoPrefix("==========step7: sin filter query ============") + tdLog.printNoPrefix("==========step7: sin boundary query ============") - self.abs_func_filter() + self.check_boundary_values() + tdLog.printNoPrefix("==========step8: check sin result of stable query ============") self.support_super_table_test() - + def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/smaTest.py b/tests/system-test/2-query/smaTest.py index 67824cc3a3c372821c5014d48f6d2dbbc9ee9066..0217b6c28c2f44deb87c032957ef749fc329742e 100644 --- a/tests/system-test/2-query/smaTest.py +++ b/tests/system-test/2-query/smaTest.py @@ -30,14 +30,6 @@ class TDTestCase: # updatecfgDict = {'debugFlag': 135} # updatecfgDict = {'fqdn': 135} - def caseDescription(self): - ''' - limit and offset keyword function test cases; - case1: limit offset base function test - case2: offset return valid - ''' - return - # init def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) @@ -47,11 +39,12 @@ class TDTestCase: self.ts = 1500000000000 - # run case + # run case def run(self): # insert data - self.insert_data1("t1", self.ts, 1000*10000) - self.insert_data1("t4", self.ts, 1000*10000) + dbname = "db" + self.insert_data1(f"{dbname}.t1", self.ts, 1000*10000) + self.insert_data1(f"{dbname}.t4", self.ts, 1000*10000) # test base case # self.test_case1() tdLog.debug(" LIMIT test_case1 ............ [OK]") @@ -60,7 +53,7 @@ class TDTestCase: tdLog.debug(" LIMIT test_case2 ............ [OK]") - # stop + # stop def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) @@ -70,16 +63,16 @@ class TDTestCase: # # create table - def create_tables(self): + def create_tables(self, dbname="db"): # super table - tdSql.execute("create table st(ts timestamp, i1 int,i2 int) tags(area int)"); + tdSql.execute(f"create table {dbname}.st(ts timestamp, i1 int,i2 int) tags(area int)") # child table - tdSql.execute("create table t1 using st tags(1)"); + tdSql.execute(f"create table {dbname}.t1 using {dbname}.st tags(1)") - tdSql.execute("create table st1(ts timestamp, i1 int ,i2 int) tags(area int) sma(i2) "); - tdSql.execute("create table t4 using st1 tags(1)"); + tdSql.execute(f"create table {dbname}.st1(ts timestamp, i1 int ,i2 int) tags(area int) sma(i2) ") + tdSql.execute(f"create table {dbname}.t4 using {dbname}.st1 tags(1)") - return + return # insert data1 def insert_data(self, tbname, ts_start, count): @@ -91,7 +84,7 @@ class TDTestCase: if i >0 and i%30000 == 0: tdSql.execute(sql) sql = pre_insert - # end sql + # end sql if sql != pre_insert: tdSql.execute(sql) @@ -107,16 +100,16 @@ class TDTestCase: if i >0 and i%30000 == 0: tdSql.execute(sql) sql = pre_insert - # end sql + # end sql if sql != pre_insert: tdSql.execute(sql) tdLog.debug("INSERT TABLE DATA ............ [OK]") return - # test case1 base + # test case1 base # def test_case1(self): - # # + # # # # limit base function # # # # base no where diff --git a/tests/system-test/2-query/sml.py b/tests/system-test/2-query/sml.py index 6cfb9a1dada47949f68a6a79ef05c7c4113a0a2f..b7e167c8b5a6b7db3421f00dcb5da8fff530c8d6 100644 --- a/tests/system-test/2-query/sml.py +++ b/tests/system-test/2-query/sml.py @@ -20,7 +20,7 @@ class TDTestCase: tdSql.init(conn.cursor()) #tdSql.init(conn.cursor(), logSql) # output sql.txt file - def checkFileContent(self): + def checkFileContent(self, dbname="sml_db"): buildPath = tdCom.getBuildPath() cmdStr = '%s/build/bin/sml_test'%(buildPath) tdLog.info(cmdStr) @@ -28,8 +28,8 @@ class TDTestCase: if ret != 0: tdLog.exit("sml_test failed") - tdSql.execute('use sml_db') - tdSql.query("select * from t_b7d815c9222ca64cdf2614c61de8f211") + # tdSql.execute('use sml_db') + tdSql.query(f"select * from {dbname}.t_b7d815c9222ca64cdf2614c61de8f211") tdSql.checkRows(1) tdSql.checkData(0, 0, '2016-01-01 08:00:07.000') @@ -44,35 +44,35 @@ class TDTestCase: tdSql.checkData(0, 9, 0) tdSql.checkData(0, 10, 25) - tdSql.query("select * from readings") + tdSql.query(f"select * from {dbname}.readings") tdSql.checkRows(9) - tdSql.query("select distinct tbname from readings") + tdSql.query(f"select distinct tbname from {dbname}.readings") tdSql.checkRows(4) - tdSql.query("select * from t_0799064f5487946e5d22164a822acfc8 order by _ts") + tdSql.query(f"select * from {dbname}.t_0799064f5487946e5d22164a822acfc8 order by _ts") tdSql.checkRows(2) tdSql.checkData(0, 3, "kk") tdSql.checkData(1, 3, None) - tdSql.query("select distinct tbname from `sys.if.bytes.out`") + tdSql.query(f"select distinct tbname from {dbname}.`sys.if.bytes.out`") tdSql.checkRows(2) - tdSql.query("select * from t_fc70dec6677d4277c5d9799c4da806da order by _ts") + tdSql.query(f"select * from {dbname}.t_fc70dec6677d4277c5d9799c4da806da order by _ts") tdSql.checkRows(2) tdSql.checkData(0, 1, 1.300000000) tdSql.checkData(1, 1,13.000000000) - tdSql.query("select * from `sys.procs.running`") + tdSql.query(f"select * from {dbname}.`sys.procs.running`") tdSql.checkRows(1) tdSql.checkData(0, 1, 42.000000000) tdSql.checkData(0, 2, "web01") - tdSql.query("select distinct tbname from `sys.cpu.nice`") + tdSql.query(f"select distinct tbname from {dbname}.`sys.cpu.nice`") tdSql.checkRows(2) - tdSql.query("select * from `sys.cpu.nice` order by _ts") + tdSql.query(f"select * from {dbname}.`sys.cpu.nice` order by _ts") tdSql.checkRows(2) tdSql.checkData(0, 1, 9.000000000) tdSql.checkData(0, 2, "lga") @@ -83,8 +83,11 @@ class TDTestCase: tdSql.checkData(1, 3, "web01") tdSql.checkData(1, 4, "t1") - tdSql.query("select * from macylr") + tdSql.query(f"select * from {dbname}.macylr") tdSql.checkRows(2) + + tdSql.query(f"desc {dbname}.macylr") + tdSql.checkRows(25) return def run(self): diff --git a/tests/system-test/2-query/spread.py b/tests/system-test/2-query/spread.py index 51c569e56567fc7fdf1e2399008eaca5acc4059d..ffe86ff36304224e2d5f776f5088a16b445a5231 100644 --- a/tests/system-test/2-query/spread.py +++ b/tests/system-test/2-query/spread.py @@ -26,6 +26,8 @@ TS_TYPE_COL = [ TS_COL, ] ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ] +DBNAME = "db" + class TDTestCase: def init(self, conn, logSql): @@ -88,6 +90,7 @@ class TDTestCase: return join_condition def __where_condition(self, col=None, tbname=None, query_conditon=None): + # tbname = tbname.split(".")[-1] if tbname else None if query_conditon and isinstance(query_conditon, str): if query_conditon.startswith("count"): query_conditon = query_conditon[6:-1] @@ -129,32 +132,33 @@ class TDTestCase: return f"select spread({select_clause}) from {from_clause} {where_condition} {group_condition}" @property - def __tb_list(self): + def __tb_list(self, dbname=DBNAME): return [ - "ct1", - "ct4", - "t1", - "ct2", - "stb1", + f"{dbname}.ct1", + f"{dbname}.ct4", + f"{dbname}.t1", + f"{dbname}.ct2", + f"{dbname}.stb1", ] def sql_list(self): sqls = [] __no_join_tblist = self.__tb_list for tb in __no_join_tblist: - select_claus_list = self.__query_condition(tb) - for select_claus in select_claus_list: - group_claus = self.__group_condition(col=select_claus) - where_claus = self.__where_condition(query_conditon=select_claus) - having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null") - sqls.extend( - ( - self.__single_sql(select_claus, tb, where_claus, having_claus), - self.__single_sql(select_claus, tb,), - self.__single_sql(select_claus, tb, where_condition=where_claus), - self.__single_sql(select_claus, tb, group_condition=group_claus), - ) + tbname = tb.split(".")[-1] + select_claus_list = self.__query_condition(tbname) + for select_claus in select_claus_list: + group_claus = self.__group_condition(col=select_claus) + where_claus = self.__where_condition(query_conditon=select_claus) + having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null") + sqls.extend( + ( + self.__single_sql(select_claus, tb, where_claus, having_claus), + self.__single_sql(select_claus, tb,), + self.__single_sql(select_claus, tb, where_condition=where_claus), + self.__single_sql(select_claus, tb, group_condition=group_claus), ) + ) # return filter(None, sqls) return list(filter(None, sqls)) @@ -166,28 +170,28 @@ class TDTestCase: tdLog.info(f"sql: {sqls[i]}") tdSql.query(sqls[i]) - def __test_current(self): - tdSql.query("select spread(ts) from ct1") + def __test_current(self, dbname=DBNAME): + tdSql.query(f"select spread(ts) from {dbname}.ct1") tdSql.checkRows(1) - tdSql.query("select spread(c1) from ct2") + tdSql.query(f"select spread(c1) from {dbname}.ct2") tdSql.checkRows(1) - tdSql.query("select spread(c1) from ct4 group by c1") + tdSql.query(f"select spread(c1) from {dbname}.ct4 group by c1") tdSql.checkRows(self.rows + 3) - tdSql.query("select spread(c1) from ct4 group by c7") + tdSql.query(f"select spread(c1) from {dbname}.ct4 group by c7") tdSql.checkRows(3) - tdSql.query("select spread(ct2.c1) from ct4 join ct2 on ct4.ts=ct2.ts") + tdSql.query(f"select spread(ct2.c1) from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts") tdSql.checkRows(1) self.spread_check() - def __test_error(self): + def __test_error(self, dbname=DBNAME): tdLog.printNoPrefix("===step 0: err case, must return err") - tdSql.error( "select spread() from ct1" ) - tdSql.error( "select spread(1, 2) from ct2" ) - tdSql.error( f"select spread({NUM_COL[0]}, {NUM_COL[1]}) from ct4" ) - tdSql.error( f"select spread({BOOLEAN_COL[0]}) from t1" ) - tdSql.error( f"select spread({CHAR_COL[0]}) from stb1" ) + tdSql.error( f"select spread() from {dbname}.ct1" ) + tdSql.error( f"select spread(1, 2) from {dbname}.ct2" ) + tdSql.error( f"select spread({NUM_COL[0]}, {NUM_COL[1]}) from {dbname}.ct4" ) + tdSql.error( f"select spread({BOOLEAN_COL[0]}) from {dbname}.t1" ) + tdSql.error( f"select spread({CHAR_COL[0]}) from {dbname}.stb1" ) # tdSql.error( ''' select spread(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']) # from ct1 @@ -196,20 +200,20 @@ class TDTestCase: # having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' ) # tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ") - def all_test(self): - self.__test_error() - self.__test_current() + def all_test(self, dbname=DBNAME): + self.__test_error(dbname) + self.__test_current(dbname) - def __create_tb(self): + def __create_tb(self, dbname=DBNAME): tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table stb1( + create_stb_sql = f'''create table {dbname}.stb1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp ) tags (t1 int) ''' - create_ntb_sql = f'''create table t1( + create_ntb_sql = f'''create table {dbname}.t1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp @@ -219,30 +223,30 @@ class TDTestCase: tdSql.execute(create_ntb_sql) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} - def __insert_data(self, rows): + def __insert_data(self, rows, dbname=DBNAME): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) for i in range(rows): tdSql.execute( - f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f'''insert into ct1 values + f'''insert into {dbname}.ct1 values ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) ''' ) tdSql.execute( - f'''insert into ct4 values + f'''insert into {dbname}.ct4 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -258,7 +262,7 @@ class TDTestCase: ) tdSql.execute( - f'''insert into ct2 values + f'''insert into {dbname}.ct2 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -274,13 +278,13 @@ class TDTestCase: ) for i in range(rows): - insert_data = f'''insert into t1 values + insert_data = f'''insert into {dbname}.t1 values ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) ''' tdSql.execute(insert_data) tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) diff --git a/tests/system-test/2-query/sqrt.py b/tests/system-test/2-query/sqrt.py index 425d59f1186615467f4aac8a085949029422b760..9597375885cf5fdedf1d52a547d7558430cb46e4 100644 --- a/tests/system-test/2-query/sqrt.py +++ b/tests/system-test/2-query/sqrt.py @@ -9,48 +9,46 @@ from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} + def init(self, conn, powSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - def prepare_datas(self): + def prepare_datas(self, dbname="db"): tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -85,84 +83,74 @@ class TDTestCase: row_check.append(elem) auto_result.append(row_check) - check_status = True - + tdSql.query(pow_query) for row_index , row in enumerate(pow_result): for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): - check_status = False - elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): - check_status = False - else: - pass - if not check_status: - tdLog.notice("sqrt function value has not as expected , sql is \"%s\" "%pow_query ) - sys.exit(1) - else: - tdLog.info("sqrt value check pass , it work as expected ,sql is \"%s\" "%pow_query ) - - def test_errors(self): + tdSql.checkData(row_index ,col_index ,auto_result[row_index][col_index]) + + + def test_errors(self, dbname="db"): error_sql_lists = [ - "select sqrt from t1", - # "select sqrt(-+--+c1 ) from t1", - # "select +-sqrt(c1) from t1", - # "select ++-sqrt(c1) from t1", - # "select ++--sqrt(c1) from t1", - # "select - -sqrt(c1)*0 from t1", - # "select sqrt(tbname+1) from t1 ", - "select sqrt(123--123)==1 from t1", - "select sqrt(c1) as 'd1' from t1", - "select sqrt(c1 ,c2) from t1", - "select sqrt(c1 ,NULL ) from t1", - "select sqrt(,) from t1;", - "select sqrt(sqrt(c1) ab from t1)", - "select sqrt(c1 ) as int from t1", - "select sqrt from stb1", - # "select sqrt(-+--+c1) from stb1", - # "select +-sqrt(c1) from stb1", - # "select ++-sqrt(c1) from stb1", - # "select ++--sqrt(c1) from stb1", - # "select - -sqrt(c1)*0 from stb1", - # "select sqrt(tbname+1) from stb1 ", - "select sqrt(123--123)==1 from stb1", - "select sqrt(c1) as 'd1' from stb1", - "select sqrt(c1 ,c2 ) from stb1", - "select sqrt(c1 ,NULL) from stb1", - "select sqrt(,) from stb1;", - "select sqrt(sqrt(c1) ab from stb1)", - "select sqrt(c1) as int from stb1" + f"select sqrt from {dbname}.t1", + # f"select sqrt(-+--+c1 ) from {dbname}.t1", + # f"select +-sqrt(c1) from {dbname}.t1", + # f"select ++-sqrt(c1) from {dbname}.t1", + # f"select ++--sqrt(c1) from {dbname}.t1", + # f"select - -sqrt(c1)*0 from {dbname}.t1", + # f"select sqrt(tbname+1) from {dbname}.t1 ", + f"select sqrt(123--123)==1 from {dbname}.t1", + f"select sqrt(c1) as 'd1' from {dbname}.t1", + f"select sqrt(c1 ,c2) from {dbname}.t1", + f"select sqrt(c1 ,NULL ) from {dbname}.t1", + f"select sqrt(,) from {dbname}.t1;", + f"select sqrt(sqrt(c1) ab from {dbname}.t1)", + f"select sqrt(c1 ) as int from {dbname}.t1", + f"select sqrt from {dbname}.stb1", + # f"select sqrt(-+--+c1) from {dbname}.stb1", + # f"select +-sqrt(c1) from {dbname}.stb1", + # f"select ++-sqrt(c1) from {dbname}.stb1", + # f"select ++--sqrt(c1) from {dbname}.stb1", + # f"select - -sqrt(c1)*0 from {dbname}.stb1", + # f"select sqrt(tbname+1) from {dbname}.stb1 ", + f"select sqrt(123--123)==1 from {dbname}.stb1", + f"select sqrt(c1) as 'd1' from {dbname}.stb1", + f"select sqrt(c1 ,c2 ) from {dbname}.stb1", + f"select sqrt(c1 ,NULL) from {dbname}.stb1", + f"select sqrt(,) from {dbname}.stb1;", + f"select sqrt(sqrt(c1) ab from {dbname}.stb1)", + f"select sqrt(c1) as int from {dbname}.stb1" ] for error_sql in error_sql_lists: tdSql.error(error_sql) - def support_types(self): + def support_types(self, dbname="db"): type_error_sql_lists = [ - "select sqrt(ts) from t1" , - "select sqrt(c7) from t1", - "select sqrt(c8) from t1", - "select sqrt(c9) from t1", - "select sqrt(ts) from ct1" , - "select sqrt(c7) from ct1", - "select sqrt(c8) from ct1", - "select sqrt(c9) from ct1", - "select sqrt(ts) from ct3" , - "select sqrt(c7) from ct3", - "select sqrt(c8) from ct3", - "select sqrt(c9) from ct3", - "select sqrt(ts) from ct4" , - "select sqrt(c7) from ct4", - "select sqrt(c8) from ct4", - "select sqrt(c9) from ct4", - "select sqrt(ts) from stb1" , - "select sqrt(c7) from stb1", - "select sqrt(c8) from stb1", - "select sqrt(c9) from stb1" , - - "select sqrt(ts) from stbbb1" , - "select sqrt(c7) from stbbb1", - - "select sqrt(ts) from tbname", - "select sqrt(c9) from tbname" + f"select sqrt(ts) from {dbname}.t1" , + f"select sqrt(c7) from {dbname}.t1", + f"select sqrt(c8) from {dbname}.t1", + f"select sqrt(c9) from {dbname}.t1", + f"select sqrt(ts) from {dbname}.ct1" , + f"select sqrt(c7) from {dbname}.ct1", + f"select sqrt(c8) from {dbname}.ct1", + f"select sqrt(c9) from {dbname}.ct1", + f"select sqrt(ts) from {dbname}.ct3" , + f"select sqrt(c7) from {dbname}.ct3", + f"select sqrt(c8) from {dbname}.ct3", + f"select sqrt(c9) from {dbname}.ct3", + f"select sqrt(ts) from {dbname}.ct4" , + f"select sqrt(c7) from {dbname}.ct4", + f"select sqrt(c8) from {dbname}.ct4", + f"select sqrt(c9) from {dbname}.ct4", + f"select sqrt(ts) from {dbname}.stb1" , + f"select sqrt(c7) from {dbname}.stb1", + f"select sqrt(c8) from {dbname}.stb1", + f"select sqrt(c9) from {dbname}.stb1" , + + f"select sqrt(ts) from {dbname}.stbbb1" , + f"select sqrt(c7) from {dbname}.stbbb1", + + f"select sqrt(ts) from {dbname}.tbname", + f"select sqrt(c9) from {dbname}.tbname" ] @@ -171,103 +159,103 @@ class TDTestCase: type_sql_lists = [ - "select sqrt(c1) from t1", - "select sqrt(c2) from t1", - "select sqrt(c3) from t1", - "select sqrt(c4) from t1", - "select sqrt(c5) from t1", - "select sqrt(c6) from t1", - - "select sqrt(c1) from ct1", - "select sqrt(c2) from ct1", - "select sqrt(c3) from ct1", - "select sqrt(c4) from ct1", - "select sqrt(c5) from ct1", - "select sqrt(c6) from ct1", - - "select sqrt(c1) from ct3", - "select sqrt(c2) from ct3", - "select sqrt(c3) from ct3", - "select sqrt(c4) from ct3", - "select sqrt(c5) from ct3", - "select sqrt(c6) from ct3", - - "select sqrt(c1) from stb1", - "select sqrt(c2) from stb1", - "select sqrt(c3) from stb1", - "select sqrt(c4) from stb1", - "select sqrt(c5) from stb1", - "select sqrt(c6) from stb1", - - "select sqrt(c6) as alisb from stb1", - "select sqrt(c6) alisb from stb1", + f"select sqrt(c1) from {dbname}.t1", + f"select sqrt(c2) from {dbname}.t1", + f"select sqrt(c3) from {dbname}.t1", + f"select sqrt(c4) from {dbname}.t1", + f"select sqrt(c5) from {dbname}.t1", + f"select sqrt(c6) from {dbname}.t1", + + f"select sqrt(c1) from {dbname}.ct1", + f"select sqrt(c2) from {dbname}.ct1", + f"select sqrt(c3) from {dbname}.ct1", + f"select sqrt(c4) from {dbname}.ct1", + f"select sqrt(c5) from {dbname}.ct1", + f"select sqrt(c6) from {dbname}.ct1", + + f"select sqrt(c1) from {dbname}.ct3", + f"select sqrt(c2) from {dbname}.ct3", + f"select sqrt(c3) from {dbname}.ct3", + f"select sqrt(c4) from {dbname}.ct3", + f"select sqrt(c5) from {dbname}.ct3", + f"select sqrt(c6) from {dbname}.ct3", + + f"select sqrt(c1) from {dbname}.stb1", + f"select sqrt(c2) from {dbname}.stb1", + f"select sqrt(c3) from {dbname}.stb1", + f"select sqrt(c4) from {dbname}.stb1", + f"select sqrt(c5) from {dbname}.stb1", + f"select sqrt(c6) from {dbname}.stb1", + + f"select sqrt(c6) as alisb from {dbname}.stb1", + f"select sqrt(c6) alisb from {dbname}.stb1", ] for type_sql in type_sql_lists: tdSql.query(type_sql) - def basic_sqrt_function(self): + def basic_sqrt_function(self, dbname="db"): # basic query - tdSql.query("select c1 from ct3") + tdSql.query(f"select c1 from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select c1 from t1") + tdSql.query(f"select c1 from {dbname}.t1") tdSql.checkRows(12) - tdSql.query("select c1 from stb1") + tdSql.query(f"select c1 from {dbname}.stb1") tdSql.checkRows(25) # used for empty table , ct3 is empty - tdSql.query("select sqrt(c1) from ct3") + tdSql.query(f"select sqrt(c1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sqrt(c2) from ct3") + tdSql.query(f"select sqrt(c2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sqrt(c3) from ct3") + tdSql.query(f"select sqrt(c3) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sqrt(c4) from ct3") + tdSql.query(f"select sqrt(c4) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sqrt(c5) from ct3") + tdSql.query(f"select sqrt(c5) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select sqrt(c6) from ct3") + tdSql.query(f"select sqrt(c6) from {dbname}.ct3") tdSql.checkRows(0) # # used for regular table - tdSql.query("select sqrt(c1) from t1") + tdSql.query(f"select sqrt(c1) from {dbname}.t1") tdSql.checkData(0, 0, None) tdSql.checkData(1 , 0, 1.000000000) tdSql.checkData(3 , 0, 1.732050808) tdSql.checkData(5 , 0, None) - tdSql.query("select c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 4, 1.11000) tdSql.checkData(3, 3, 33) tdSql.checkData(5, 4, None) - tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 5, 1.11000) tdSql.checkData(3, 4, 33) tdSql.checkData(5, 5, None) - self.check_result_auto_sqrt( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from t1") + self.check_result_auto_sqrt( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.t1", f"select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from {dbname}.t1") # used for sub table - tdSql.query("select c2 ,sqrt(c2) from ct1") + tdSql.query(f"select c2 ,sqrt(c2) from {dbname}.ct1") tdSql.checkData(0, 1, 298.140906284) tdSql.checkData(1 , 1, 278.885281074) tdSql.checkData(3 , 1, 235.701081881) tdSql.checkData(4 , 1, 0.000000000) - tdSql.query("select c1, c5 ,sqrt(c5) from ct4") + tdSql.query(f"select c1, c5 ,sqrt(c5) from {dbname}.ct4") tdSql.checkData(0 , 2, None) tdSql.checkData(1 , 2, 2.979932904) tdSql.checkData(2 , 2, 2.787471970) tdSql.checkData(3 , 2, 2.580697551) tdSql.checkData(5 , 2, None) - self.check_result_auto_sqrt( "select c1, c2, c3 , c4, c5 from ct1", "select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c4), sqrt(c5) from ct1") + self.check_result_auto_sqrt( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c4), sqrt(c5) from {dbname}.ct1") # nest query for sqrt functions - tdSql.query("select c4 , sqrt(c4) ,sqrt(sqrt(c4)) , sqrt(sqrt(sqrt(c4))) from ct1;") + tdSql.query(f"select c4 , sqrt(c4) ,sqrt(sqrt(c4)) , sqrt(sqrt(sqrt(c4))) from {dbname}.ct1;") tdSql.checkData(0 , 0 , 88) tdSql.checkData(0 , 1 , 9.380831520) tdSql.checkData(0 , 2 , 3.062814314) @@ -285,22 +273,22 @@ class TDTestCase: # used for stable table - tdSql.query("select sqrt(c1) from stb1") + tdSql.query(f"select sqrt(c1) from {dbname}.stb1") tdSql.checkRows(25) # used for not exists table - tdSql.error("select sqrt(c1) from stbbb1") - tdSql.error("select sqrt(c1) from tbname") - tdSql.error("select sqrt(c1) from ct5") + tdSql.error(f"select sqrt(c1) from {dbname}.stbbb1") + tdSql.error(f"select sqrt(c1) from {dbname}.tbname") + tdSql.error(f"select sqrt(c1) from {dbname}.ct5") # mix with common col - tdSql.query("select c1, sqrt(c1) from ct1") + tdSql.query(f"select c1, sqrt(c1) from {dbname}.ct1") tdSql.checkData(0 , 0 ,8) tdSql.checkData(0 , 1 ,2.828427125) tdSql.checkData(4 , 0 ,0) tdSql.checkData(4 , 1 ,0.000000000) - tdSql.query("select c2, sqrt(c2) from ct4") + tdSql.query(f"select c2, sqrt(c2) from {dbname}.ct4") tdSql.checkData(0 , 0 , None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(4 , 0 ,55555) @@ -309,7 +297,7 @@ class TDTestCase: tdSql.checkData(5 , 1 ,None) # mix with common functions - tdSql.query("select c1, sqrt(c1),sqrt(c1), sqrt(sqrt(c1)) from ct4 ") + tdSql.query(f"select c1, sqrt(c1),sqrt(c1), sqrt(sqrt(c1)) from {dbname}.ct4 ") tdSql.checkData(0 , 0 ,None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 2 ,None) @@ -320,34 +308,34 @@ class TDTestCase: tdSql.checkData(3 , 2 ,2.449489743) tdSql.checkData(3 , 3 ,1.565084580) - tdSql.query("select c1, sqrt(c1),c5, floor(c5) from stb1 ") + tdSql.query(f"select c1, sqrt(c1),c5, floor(c5) from {dbname}.stb1 ") # # mix with agg functions , not support - tdSql.error("select c1, sqrt(c1),c5, count(c5) from stb1 ") - tdSql.error("select c1, sqrt(c1),c5, count(c5) from ct1 ") - tdSql.error("select sqrt(c1), count(c5) from stb1 ") - tdSql.error("select sqrt(c1), count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from stb1 ") + tdSql.error(f"select c1, sqrt(c1),c5, count(c5) from {dbname}.stb1 ") + tdSql.error(f"select c1, sqrt(c1),c5, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select sqrt(c1), count(c5) from {dbname}.stb1 ") + tdSql.error(f"select sqrt(c1), count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ") # agg functions mix with agg functions - tdSql.query("select max(c5), count(c5) from stb1") - tdSql.query("select max(c5), count(c5) from ct1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1") # bug fix for count - tdSql.query("select count(c1) from ct4 ") + tdSql.query(f"select count(c1) from {dbname}.ct4 ") tdSql.checkData(0,0,9) - tdSql.query("select count(*) from ct4 ") + tdSql.query(f"select count(*) from {dbname}.ct4 ") tdSql.checkData(0,0,12) - tdSql.query("select count(c1) from stb1 ") + tdSql.query(f"select count(c1) from {dbname}.stb1 ") tdSql.checkData(0,0,22) - tdSql.query("select count(*) from stb1 ") + tdSql.query(f"select count(*) from {dbname}.stb1 ") tdSql.checkData(0,0,25) # # bug fix for compute - tdSql.query("select c1, sqrt(c1) -0 ,sqrt(c1-4)-0 from ct4 ") + tdSql.query(f"select c1, sqrt(c1) -0 ,sqrt(c1-4)-0 from {dbname}.ct4 ") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -355,7 +343,7 @@ class TDTestCase: tdSql.checkData(1, 1, 2.828427125) tdSql.checkData(1, 2, 2.000000000) - tdSql.query(" select c1, sqrt(c1) -0 ,sqrt(c1-0.1)-0.1 from ct4") + tdSql.query(f"select c1, sqrt(c1) -0 ,sqrt(c1-0.1)-0.1 from {dbname}.ct4") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -363,57 +351,56 @@ class TDTestCase: tdSql.checkData(1, 1, 2.828427125) tdSql.checkData(1, 2, 2.710693865) - tdSql.query("select c1, sqrt(c1), c2, sqrt(c2), c3, sqrt(c3) from ct1") + tdSql.query(f"select c1, sqrt(c1), c2, sqrt(c2), c3, sqrt(c3) from {dbname}.ct1") - def test_big_number(self): + def test_big_number(self, dbname="db"): - tdSql.query("select c1, sqrt(100000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, sqrt(100000000) from {dbname}.ct1") # bigint to double data overflow tdSql.checkData(4, 1, 10000.000000000) - tdSql.query("select c1, sqrt(10000000000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, sqrt(10000000000000) from {dbname}.ct1") # bigint to double data overflow tdSql.checkData(4, 1, 3162277.660168380) - tdSql.query("select c1, sqrt(c1) + sqrt(10000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, sqrt(c1) + sqrt(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, sqrt(c1) + sqrt(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, sqrt(c1) + sqrt(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(1, 1, 3162277660171.025390625) - tdSql.query("select c1, sqrt(10000000000000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, sqrt(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(4, 1, 100000000000000000.000000000) - tdSql.query("select c1, sqrt(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, sqrt(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(4, 1, 100000000000000000000.000000000) - tdSql.query("select c1, sqrt(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow - def pow_base_test(self): + def pow_base_test(self, dbname="db"): # base is an regular number ,int or double - tdSql.query("select c1, sqrt(c1) from ct1") + tdSql.query(f"select c1, sqrt(c1) from {dbname}.ct1") tdSql.checkData(0, 1,2.828427125) tdSql.checkRows(13) # # bug for compute in functions - # tdSql.query("select c1, abs(1/0) from ct1") + # tdSql.query(f"select c1, abs(1/0) from {dbname}.ct1") # tdSql.checkData(0, 0, 8) # tdSql.checkData(0, 1, 1) - tdSql.query("select c1, sqrt(1) from ct1") + tdSql.query(f"select c1, sqrt(1) from {dbname}.ct1") tdSql.checkData(0, 1, 1.000000000) tdSql.checkRows(13) # two cols start sqrt(x,y) - tdSql.query("select c1,c2, sqrt(c2) from ct1") + tdSql.query(f"select c1,c2, sqrt(c2) from {dbname}.ct1") tdSql.checkData(0, 2, 298.140906284) tdSql.checkData(1, 2, 278.885281074) tdSql.checkData(4, 2, 0.000000000) - def abs_func_filter(self): - tdSql.execute("use db") - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1>5 ") + def abs_func_filter(self, dbname="db"): + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1>5 ") tdSql.checkRows(3) tdSql.checkData(0,0,8) tdSql.checkData(0,1,8.000000000) @@ -421,7 +408,7 @@ class TDTestCase: tdSql.checkData(0,3,7.900000000) tdSql.checkData(0,4,3.000000000) - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1=5 ") + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1=5 ") tdSql.checkRows(1) tdSql.checkData(0,0,5) tdSql.checkData(0,1,5.000000000) @@ -429,7 +416,7 @@ class TDTestCase: tdSql.checkData(0,3,4.900000000) tdSql.checkData(0,4,2.000000000) - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1=5 ") + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1=5 ") tdSql.checkRows(1) tdSql.checkData(0,0,5) tdSql.checkData(0,1,5.000000000) @@ -437,7 +424,7 @@ class TDTestCase: tdSql.checkData(0,3,4.900000000) tdSql.checkData(0,4,2.000000000) - tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1=sqrt(c1) limit 1 ") + tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1=sqrt(c1) limit 1 ") tdSql.checkRows(1) tdSql.checkData(0,0,1) tdSql.checkData(0,1,11111) @@ -446,42 +433,37 @@ class TDTestCase: tdSql.checkData(0,4,0.900000000) tdSql.checkData(0,5,1.000000000) - def pow_Arithmetic(self): - pass - - def check_boundary_values(self): + def check_boundary_values(self, dbname="bound_test"): - tdSql.execute("drop database if exists bound_test") - tdSql.execute("create database if not exists bound_test") - time.sleep(3) - tdSql.execute("use bound_test") + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname}") tdSql.execute( - "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" ) - tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )') tdSql.execute( - f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.error( - f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - self.check_result_auto_sqrt( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from sub1_bound") + self.check_result_auto_sqrt( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from {dbname}.sub1_bound") - self.check_result_auto_sqrt( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c3), sqrt(c2) ,sqrt(c1) from sub1_bound") + self.check_result_auto_sqrt( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c3), sqrt(c2) ,sqrt(c1) from {dbname}.sub1_bound") - self.check_result_auto_sqrt("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select sqrt(abs(c1)) from sub1_bound" ) + self.check_result_auto_sqrt(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select sqrt(abs(c1)) from {dbname}.sub1_bound" ) # check basic elem for table per row - tdSql.query("select sqrt(abs(c1)) ,sqrt(abs(c2)) , sqrt(abs(c3)) , sqrt(abs(c4)), sqrt(abs(c5)), sqrt(abs(c6)) from sub1_bound ") + tdSql.query(f"select sqrt(abs(c1)) ,sqrt(abs(c2)) , sqrt(abs(c3)) , sqrt(abs(c4)), sqrt(abs(c5)), sqrt(abs(c6)) from {dbname}.sub1_bound ") tdSql.checkData(0,0,math.sqrt(2147483647)) tdSql.checkData(0,1,math.sqrt(9223372036854775807)) tdSql.checkData(0,2,math.sqrt(32767)) @@ -499,23 +481,22 @@ class TDTestCase: tdSql.checkData(3,4,math.sqrt(339999995214436424907732413799364296704.00000)) # check + - * / in functions - tdSql.query("select sqrt(abs(c1+1)) ,sqrt(abs(c2)) , sqrt(abs(c3*1)) , sqrt(abs(c4/2)), sqrt(abs(c5))/2, sqrt(abs(c6)) from sub1_bound ") + tdSql.query(f"select sqrt(abs(c1+1)) ,sqrt(abs(c2)) , sqrt(abs(c3*1)) , sqrt(abs(c4/2)), sqrt(abs(c5))/2, sqrt(abs(c6)) from {dbname}.sub1_bound ") tdSql.checkData(0,0,math.sqrt(2147483648.000000000)) tdSql.checkData(0,1,math.sqrt(9223372036854775807)) tdSql.checkData(0,2,math.sqrt(32767.000000000)) tdSql.checkData(0,3,math.sqrt(63.500000000)) - def support_super_table_test(self): - tdSql.execute(" use db ") - self.check_result_auto_sqrt( " select c5 from stb1 order by ts " , "select sqrt(c5) from stb1 order by ts" ) - self.check_result_auto_sqrt( " select c5 from stb1 order by tbname " , "select sqrt(c5) from stb1 order by tbname" ) - self.check_result_auto_sqrt( " select c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(c5) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_sqrt( " select c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(c5) from stb1 where c1 > 0 order by tbname" ) - - self.check_result_auto_sqrt( " select t1,c5 from stb1 order by ts " , "select sqrt(t1), sqrt(c5) from stb1 order by ts" ) - self.check_result_auto_sqrt( " select t1,c5 from stb1 order by tbname " , "select sqrt(t1) ,sqrt(c5) from stb1 order by tbname" ) - self.check_result_auto_sqrt( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(t1) ,sqrt(c5) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_sqrt( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(t1) , sqrt(c5) from stb1 where c1 > 0 order by tbname" ) + def support_super_table_test(self, dbname="db"): + self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 order by ts " , f"select sqrt(c5) from {dbname}.stb1 order by ts" ) + self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 order by tbname " , f"select sqrt(c5) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + + self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select sqrt(t1), sqrt(c5) from {dbname}.stb1 order by ts" ) + self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select sqrt(t1) ,sqrt(c5) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(t1) ,sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(t1) , sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) pass def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring diff --git a/tests/system-test/2-query/statecount.py b/tests/system-test/2-query/statecount.py index a88c4aef9fdad7580d4d10a642093c80750b1c57..c73c955de405ee54e6924c25cd219aa8b8a7f4eb 100644 --- a/tests/system-test/2-query/statecount.py +++ b/tests/system-test/2-query/statecount.py @@ -11,50 +11,47 @@ from util.sql import * from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) self.ts = 1420041600000 # 2015-01-01 00:00:00 this is begin time for first record - def prepare_datas(self): + def prepare_datas(self, dbname="db"): tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -70,68 +67,68 @@ class TDTestCase: ''' ) - def test_errors(self): + def test_errors(self, dbname="db"): error_sql_lists = [ - # "select statecount(c1,'GT',5) from t1" - "select statecount from t1", - "select statecount(123--123)==1 from t1", - "select statecount(123,123) from t1", - "select statecount(c1,ts) from t1", - "select statecount(c1,c1,ts) from t1", - "select statecount(c1 ,c2 ) from t1", - "select statecount(c1 ,NULL) from t1", - #"select statecount(c1 ,'NULL',1.0) from t1", - "select statecount(c1 ,'GT','1') from t1", - "select statecount(c1 ,'GT','tbname') from t1", - "select statecount(c1 ,'GT','*') from t1", - "select statecount(c1 ,'GT',ts) from t1", - "select statecount(c1 ,'GT',max(c1)) from t1", - # "select statecount(abs(c1) ,'GT',1) from t1", - # "select statecount(c1+2 ,'GT',1) from t1", - "select statecount(c1 ,'GT',1,1u) from t1", - "select statecount(c1 ,'GT',1,now) from t1", - "select statecount(c1 ,'GT','1') from t1", - "select statecount(c1 ,'GT','1',True) from t1", - "select statecount(statecount(c1) ab from t1)", - "select statecount(c1 ,'GT',1,,)int from t1", - "select statecount('c1','GT',1) from t1", - "select statecount('c1','GT' , NULL) from t1", - "select statecount('c1','GT', 1 , '') from t1", - "select statecount('c1','GT', 1 ,c%) from t1", - "select statecount(c1 ,'GT',1,t1) from t1", - "select statecount(c1 ,'GT',1,True) from t1", - "select statecount(c1 ,'GT',1) , count(c1) from t1", - "select statecount(c1 ,'GT',1) , avg(c1) from t1", - "select statecount(c1 ,'GT',1) , min(c1) from t1", - "select statecount(c1 ,'GT',1) , spread(c1) from t1", - "select statecount(c1 ,'GT',1) , diff(c1) from t1", + # f"select statecount(c1,'GT',5) from {dbname}.t1" + f"select statecount from {dbname}.t1", + f"select statecount(123--123)==1 from {dbname}.t1", + f"select statecount(123,123) from {dbname}.t1", + f"select statecount(c1,ts) from {dbname}.t1", + f"select statecount(c1,c1,ts) from {dbname}.t1", + f"select statecount(c1 ,c2 ) from {dbname}.t1", + f"select statecount(c1 ,NULL) from {dbname}.t1", + #f"select statecount(c1 ,'NULL',1.0) from {dbname}.t1", + f"select statecount(c1 ,'GT','1') from {dbname}.t1", + f"select statecount(c1 ,'GT','tbname') from {dbname}.t1", + f"select statecount(c1 ,'GT','*') from {dbname}.t1", + f"select statecount(c1 ,'GT',ts) from {dbname}.t1", + f"select statecount(c1 ,'GT',max(c1)) from {dbname}.t1", + # f"select statecount(abs(c1) ,'GT',1) from {dbname}.t1", + # f"select statecount(c1+2 ,'GT',1) from {dbname}.t1", + f"select statecount(c1 ,'GT',1,1u) from {dbname}.t1", + f"select statecount(c1 ,'GT',1,now) from {dbname}.t1", + f"select statecount(c1 ,'GT','1') from {dbname}.t1", + f"select statecount(c1 ,'GT','1',True) from {dbname}.t1", + f"select statecount(statecount(c1) ab from {dbname}.t1)", + f"select statecount(c1 ,'GT',1,,)int from {dbname}.t1", + f"select statecount('c1','GT',1) from {dbname}.t1", + f"select statecount('c1','GT' , NULL) from {dbname}.t1", + f"select statecount('c1','GT', 1 , '') from {dbname}.t1", + f"select statecount('c1','GT', 1 ,c%) from {dbname}.t1", + f"select statecount(c1 ,'GT',1,t1) from {dbname}.t1", + f"select statecount(c1 ,'GT',1,True) from {dbname}.t1", + f"select statecount(c1 ,'GT',1) , count(c1) from {dbname}.t1", + f"select statecount(c1 ,'GT',1) , avg(c1) from {dbname}.t1", + f"select statecount(c1 ,'GT',1) , min(c1) from {dbname}.t1", + f"select statecount(c1 ,'GT',1) , spread(c1) from {dbname}.t1", + f"select statecount(c1 ,'GT',1) , diff(c1) from {dbname}.t1", ] for error_sql in error_sql_lists: tdSql.error(error_sql) pass - def support_types(self): + def support_types(self, dbname="db"): other_no_value_types = [ - "select statecount(ts,'GT',1) from t1" , - "select statecount(c7,'GT',1) from t1", - "select statecount(c8,'GT',1) from t1", - "select statecount(c9,'GT',1) from t1", - "select statecount(ts,'GT',1) from ct1" , - "select statecount(c7,'GT',1) from ct1", - "select statecount(c8,'GT',1) from ct1", - "select statecount(c9,'GT',1) from ct1", - "select statecount(ts,'GT',1) from ct3" , - "select statecount(c7,'GT',1) from ct3", - "select statecount(c8,'GT',1) from ct3", - "select statecount(c9,'GT',1) from ct3", - "select statecount(ts,'GT',1) from ct4" , - "select statecount(c7,'GT',1) from ct4", - "select statecount(c8,'GT',1) from ct4", - "select statecount(c9,'GT',1) from ct4", - "select statecount(ts,'GT',1) from stb1 partition by tbname" , - "select statecount(c7,'GT',1) from stb1 partition by tbname", - "select statecount(c8,'GT',1) from stb1 partition by tbname", - "select statecount(c9,'GT',1) from stb1 partition by tbname" + f"select statecount(ts,'GT',1) from {dbname}.t1" , + f"select statecount(c7,'GT',1) from {dbname}.t1", + f"select statecount(c8,'GT',1) from {dbname}.t1", + f"select statecount(c9,'GT',1) from {dbname}.t1", + f"select statecount(ts,'GT',1) from {dbname}.ct1" , + f"select statecount(c7,'GT',1) from {dbname}.ct1", + f"select statecount(c8,'GT',1) from {dbname}.ct1", + f"select statecount(c9,'GT',1) from {dbname}.ct1", + f"select statecount(ts,'GT',1) from {dbname}.ct3" , + f"select statecount(c7,'GT',1) from {dbname}.ct3", + f"select statecount(c8,'GT',1) from {dbname}.ct3", + f"select statecount(c9,'GT',1) from {dbname}.ct3", + f"select statecount(ts,'GT',1) from {dbname}.ct4" , + f"select statecount(c7,'GT',1) from {dbname}.ct4", + f"select statecount(c8,'GT',1) from {dbname}.ct4", + f"select statecount(c9,'GT',1) from {dbname}.ct4", + f"select statecount(ts,'GT',1) from {dbname}.stb1 partition by tbname" , + f"select statecount(c7,'GT',1) from {dbname}.stb1 partition by tbname", + f"select statecount(c8,'GT',1) from {dbname}.stb1 partition by tbname", + f"select statecount(c9,'GT',1) from {dbname}.stb1 partition by tbname" ] for type_sql in other_no_value_types: @@ -139,224 +136,222 @@ class TDTestCase: tdLog.info("support type ok , sql is : %s"%type_sql) type_sql_lists = [ - "select statecount(c1,'GT',1) from t1", - "select statecount(c2,'GT',1) from t1", - "select statecount(c3,'GT',1) from t1", - "select statecount(c4,'GT',1) from t1", - "select statecount(c5,'GT',1) from t1", - "select statecount(c6,'GT',1) from t1", - - "select statecount(c1,'GT',1) from ct1", - "select statecount(c2,'GT',1) from ct1", - "select statecount(c3,'GT',1) from ct1", - "select statecount(c4,'GT',1) from ct1", - "select statecount(c5,'GT',1) from ct1", - "select statecount(c6,'GT',1) from ct1", - - "select statecount(c1,'GT',1) from ct3", - "select statecount(c2,'GT',1) from ct3", - "select statecount(c3,'GT',1) from ct3", - "select statecount(c4,'GT',1) from ct3", - "select statecount(c5,'GT',1) from ct3", - "select statecount(c6,'GT',1) from ct3", - - "select statecount(c1,'GT',1) from stb1 partition by tbname", - "select statecount(c2,'GT',1) from stb1 partition by tbname", - "select statecount(c3,'GT',1) from stb1 partition by tbname", - "select statecount(c4,'GT',1) from stb1 partition by tbname", - "select statecount(c5,'GT',1) from stb1 partition by tbname", - "select statecount(c6,'GT',1) from stb1 partition by tbname", - - "select statecount(c6,'GT',1) as alisb from stb1 partition by tbname", - "select statecount(c6,'GT',1) alisb from stb1 partition by tbname", + f"select statecount(c1,'GT',1) from {dbname}.t1", + f"select statecount(c2,'GT',1) from {dbname}.t1", + f"select statecount(c3,'GT',1) from {dbname}.t1", + f"select statecount(c4,'GT',1) from {dbname}.t1", + f"select statecount(c5,'GT',1) from {dbname}.t1", + f"select statecount(c6,'GT',1) from {dbname}.t1", + + f"select statecount(c1,'GT',1) from {dbname}.ct1", + f"select statecount(c2,'GT',1) from {dbname}.ct1", + f"select statecount(c3,'GT',1) from {dbname}.ct1", + f"select statecount(c4,'GT',1) from {dbname}.ct1", + f"select statecount(c5,'GT',1) from {dbname}.ct1", + f"select statecount(c6,'GT',1) from {dbname}.ct1", + + f"select statecount(c1,'GT',1) from {dbname}.ct3", + f"select statecount(c2,'GT',1) from {dbname}.ct3", + f"select statecount(c3,'GT',1) from {dbname}.ct3", + f"select statecount(c4,'GT',1) from {dbname}.ct3", + f"select statecount(c5,'GT',1) from {dbname}.ct3", + f"select statecount(c6,'GT',1) from {dbname}.ct3", + + f"select statecount(c1,'GT',1) from {dbname}.stb1 partition by tbname", + f"select statecount(c2,'GT',1) from {dbname}.stb1 partition by tbname", + f"select statecount(c3,'GT',1) from {dbname}.stb1 partition by tbname", + f"select statecount(c4,'GT',1) from {dbname}.stb1 partition by tbname", + f"select statecount(c5,'GT',1) from {dbname}.stb1 partition by tbname", + f"select statecount(c6,'GT',1) from {dbname}.stb1 partition by tbname", + + f"select statecount(c6,'GT',1) as alisb from {dbname}.stb1 partition by tbname", + f"select statecount(c6,'GT',1) alisb from {dbname}.stb1 partition by tbname", ] for type_sql in type_sql_lists: tdSql.query(type_sql) - def support_opers(self): + def support_opers(self, dbname="db"): oper_lists = ['LT','lt','Lt','lT','GT','gt','Gt','gT','LE','le','Le','lE','GE','ge','Ge','gE','NE','ne','Ne','nE','EQ','eq','Eq','eQ'] oper_errors = [",","*","NULL","tbname","ts","sum","_c0"] for oper in oper_lists: - tdSql.query(f"select statecount(c1 ,'{oper}',1) as col from t1") + tdSql.query(f"select statecount(c1 ,'{oper}',1) as col from {dbname}.t1") tdSql.checkRows(12) for oper in oper_errors: - tdSql.error(f"select statecount(c1 ,'{oper}',1) as col from t1") - + tdSql.error(f"select statecount(c1 ,'{oper}',1) as col from {dbname}.t1") - def basic_statecount_function(self): + def basic_statecount_function(self, dbname="db"): # basic query - tdSql.query("select c1 from ct3") + tdSql.query(f"select c1 from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select c1 from t1") + tdSql.query(f"select c1 from {dbname}.t1") tdSql.checkRows(12) - tdSql.query("select c1 from stb1") + tdSql.query(f"select c1 from {dbname}.stb1") tdSql.checkRows(25) # used for empty table , ct3 is empty - tdSql.query("select statecount(c6,'GT',1) from ct3") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select statecount(c6,'GT',1) from ct3") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select statecount(c6,'GT',1) from ct3") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select statecount(c6,'GT',1) from ct3") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select statecount(c6,'GT',1) from ct3") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select statecount(c6,'GT',1) from ct3") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3") # will support _rowts mix with - # tdSql.query("select (c6,'GT',1),_rowts from ct3") + # tdSql.query(f"select (c6,'GT',1),_rowts from {dbname}.ct3") # auto check for t1 table # used for regular table - tdSql.query("select statecount(c6,'GT',1) from t1") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.t1") # unique with super tags - tdSql.query("select statecount(c6,'GT',1) from ct1") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select statecount(c6,'GT',1) from ct4") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct4") tdSql.checkRows(12) - tdSql.query("select statecount(c6,'GT',1),tbname from ct1") + tdSql.query(f"select statecount(c6,'GT',1),tbname from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select statecount(c6,'GT',1),t1 from ct1") + tdSql.query(f"select statecount(c6,'GT',1),t1 from {dbname}.ct1") tdSql.checkRows(13) # unique with common col - tdSql.query("select statecount(c6,'GT',1) ,ts from ct1") + tdSql.query(f"select statecount(c6,'GT',1) ,ts from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select ts, statecount(c6,'GT',1) from ct1") + tdSql.query(f"select ts, statecount(c6,'GT',1) from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select statecount(c6,'GT',1) ,c1 from ct1") + tdSql.query(f"select statecount(c6,'GT',1) ,c1 from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select c1, statecount(c6,'GT',1) from ct1") + tdSql.query(f"select c1, statecount(c6,'GT',1) from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select ts, c1, c2, c3, statecount(c6,'GT',1) from ct1") + tdSql.query(f"select ts, c1, c2, c3, statecount(c6,'GT',1) from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select statecount(c6,'GT',1), ts, c1, c2, c3 from ct1") + tdSql.query(f"select statecount(c6,'GT',1), ts, c1, c2, c3 from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select ts, c1, c2, c3, statecount(c6,'GT',1), ts, c4, c5, c6 from ct1") + tdSql.query(f"select ts, c1, c2, c3, statecount(c6,'GT',1), ts, c4, c5, c6 from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select stateduration(c6,'GT',1) ,ts from ct1") + tdSql.query(f"select stateduration(c6,'GT',1) ,ts from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select ts, stateduration(c6,'GT',1) from ct1") + tdSql.query(f"select ts, stateduration(c6,'GT',1) from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select stateduration(c6,'GT',1) ,c1 from ct1") + tdSql.query(f"select stateduration(c6,'GT',1) ,c1 from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select c1, stateduration(c6,'GT',1) from ct1") + tdSql.query(f"select c1, stateduration(c6,'GT',1) from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select ts, c1, c2, c3, stateduration(c6,'GT',1) from ct1") + tdSql.query(f"select ts, c1, c2, c3, stateduration(c6,'GT',1) from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select stateduration(c6,'GT',1), ts, c1, c2, c3 from ct1") + tdSql.query(f"select stateduration(c6,'GT',1), ts, c1, c2, c3 from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select ts, c1, c2, c3, stateduration(c6,'GT',1), ts, c4, c5, c6 from ct1") + tdSql.query(f"select ts, c1, c2, c3, stateduration(c6,'GT',1), ts, c4, c5, c6 from {dbname}.ct1") tdSql.checkRows(13) # unique with scalar function - tdSql.query("select statecount(c6,'GT',1) , abs(c1) from ct1") + tdSql.query(f"select statecount(c6,'GT',1) , abs(c1) from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select statecount(c6,'GT',1) , abs(c2)+2 from ct1") + tdSql.query(f"select statecount(c6,'GT',1) , abs(c2)+2 from {dbname}.ct1") tdSql.checkRows(13) - tdSql.error("select statecount(c6,'GT',1) , unique(c2) from ct1") + tdSql.error(f"select statecount(c6,'GT',1) , unique(c2) from {dbname}.ct1") - tdSql.query("select stateduration(c6,'GT',1) , abs(c1) from ct1") + tdSql.query(f"select stateduration(c6,'GT',1) , abs(c1) from {dbname}.ct1") tdSql.checkRows(13) - tdSql.query("select stateduration(c6,'GT',1) , abs(c2)+2 from ct1") + tdSql.query(f"select stateduration(c6,'GT',1) , abs(c2)+2 from {dbname}.ct1") tdSql.checkRows(13) - tdSql.error("select stateduration(c6,'GT',1) , unique(c2) from ct1") + tdSql.error(f"select stateduration(c6,'GT',1) , unique(c2) from {dbname}.ct1") # unique with aggregate function - tdSql.error("select statecount(c6,'GT',1) ,sum(c1) from ct1") - tdSql.error("select statecount(c6,'GT',1) ,max(c1) from ct1") - tdSql.error("select statecount(c6,'GT',1) ,csum(c1) from ct1") - tdSql.error("select statecount(c6,'GT',1) ,count(c1) from ct1") + tdSql.error(f"select statecount(c6,'GT',1) ,sum(c1) from {dbname}.ct1") + tdSql.error(f"select statecount(c6,'GT',1) ,max(c1) from {dbname}.ct1") + tdSql.error(f"select statecount(c6,'GT',1) ,csum(c1) from {dbname}.ct1") + tdSql.error(f"select statecount(c6,'GT',1) ,count(c1) from {dbname}.ct1") # unique with filter where - tdSql.query("select statecount(c6,'GT',1) from ct4 where c1 is null") + tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct4 where c1 is null") tdSql.checkData(0, 0, None) tdSql.checkData(1, 0, None) tdSql.checkData(2, 0, None) - tdSql.query("select statecount(c1,'GT',1) from t1 where c1 >2 ") + tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.t1 where c1 >2 ") tdSql.checkData(0, 0, 1) tdSql.checkData(1, 0, 2) tdSql.checkData(2, 0, 3) tdSql.checkData(4, 0, 5) tdSql.checkData(5, 0, 6) - tdSql.query("select statecount(c2,'GT',1) from t1 where c2 between 0 and 99999") + tdSql.query(f"select statecount(c2,'GT',1) from {dbname}.t1 where c2 between 0 and 99999") tdSql.checkData(0, 0, 1) tdSql.checkData(1, 0, 2) tdSql.checkData(6, 0, -1) # unique with union all - tdSql.query("select statecount(c1,'GT',1) from ct4 union all select statecount(c1,'GT',1) from ct1") + tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.ct4 union all select statecount(c1,'GT',1) from {dbname}.ct1") tdSql.checkRows(25) - tdSql.query("select statecount(c1,'GT',1) from ct4 union all select distinct(c1) from ct4") + tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.ct4 union all select distinct(c1) from {dbname}.ct4") tdSql.checkRows(22) # unique with join # prepare join datas with same ts - tdSql.execute(" use db ") - tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)") - tdSql.execute(" create table tb1 using st1 tags(1)") - tdSql.execute(" create table tb2 using st1 tags(2)") + tdSql.execute(f"create stable {dbname}.st1 (ts timestamp , num int) tags(ind int)") + tdSql.execute(f"create table {dbname}.tb1 using {dbname}.st1 tags(1)") + tdSql.execute(f"create table {dbname}.tb2 using {dbname}.st1 tags(2)") - tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)") - tdSql.execute(" create table ttb1 using st2 tags(1)") - tdSql.execute(" create table ttb2 using st2 tags(2)") + tdSql.execute(f"create stable {dbname}.st2 (ts timestamp , num int) tags(ind int)") + tdSql.execute(f"create table {dbname}.ttb1 using {dbname}.st2 tags(1)") + tdSql.execute(f"create table {dbname}.ttb2 using {dbname}.st2 tags(2)") start_ts = 1622369635000 # 2021-05-30 18:13:55 for i in range(10): ts_value = start_ts+i*1000 - tdSql.execute(f" insert into tb1 values({ts_value} , {i})") - tdSql.execute(f" insert into tb2 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.tb1 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.tb2 values({ts_value} , {i})") - tdSql.execute(f" insert into ttb1 values({ts_value} , {i})") - tdSql.execute(f" insert into ttb2 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.ttb1 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.ttb2 values({ts_value} , {i})") - tdSql.query("select statecount(tb1.num,'GT',1) from tb1, tb2 where tb1.ts=tb2.ts ") + tdSql.query(f"select statecount(tb1.num,'GT',1) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts ") tdSql.checkRows(10) tdSql.checkData(0,0,-1) tdSql.checkData(1,0,-1) tdSql.checkData(2,0,1) tdSql.checkData(9,0,8) - tdSql.query("select statecount(tb1.num,'GT',1) from tb1, tb2 where tb1.ts=tb2.ts union all select statecount(tb2.num,'GT',1) from tb1, tb2 where tb1.ts=tb2.ts ") + tdSql.query(f"select statecount(tb1.num,'GT',1) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts union all select statecount(tb2.num,'GT',1) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts ") tdSql.checkRows(20) # nest query - # tdSql.query("select unique(c1) from (select c1 from ct1)") - tdSql.query("select c1 from (select statecount(c1,'GT',1) c1 from t1)") + # tdSql.query(f"select unique(c1) from (select c1 from {dbname}.ct1)") + tdSql.query(f"select c1 from (select statecount(c1,'GT',1) c1 from {dbname}.t1)") tdSql.checkRows(12) tdSql.checkData(0, 0, None) tdSql.checkData(1, 0, -1) tdSql.checkData(2, 0, 1) tdSql.checkData(10, 0, 8) - tdSql.query("select sum(c1) from (select statecount(c1,'GT',1) c1 from t1)") + tdSql.query(f"select sum(c1) from (select statecount(c1,'GT',1) c1 from {dbname}.t1)") tdSql.checkRows(1) tdSql.checkData(0, 0, 35) - tdSql.query("select sum(c1) from (select distinct(c1) c1 from ct1) union all select sum(c1) from (select statecount(c1,'GT',1) c1 from ct1)") + tdSql.query(f"select sum(c1) from (select distinct(c1) c1 from {dbname}.ct1) union all select sum(c1) from (select statecount(c1,'GT',1) c1 from {dbname}.ct1)") tdSql.checkRows(2) - tdSql.query("select 1-abs(c1) from (select statecount(c1,'GT',1) c1 from t1)") + tdSql.query(f"select 1-abs(c1) from (select statecount(c1,'GT',1) c1 from {dbname}.t1)") tdSql.checkRows(12) tdSql.checkData(0, 0, None) tdSql.checkData(1, 0, 0.000000000) @@ -365,43 +360,41 @@ class TDTestCase: # bug for stable #partition by tbname - # tdSql.query(" select unique(c1) from stb1 partition by tbname ") + # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ") # tdSql.checkRows(21) - # tdSql.query(" select unique(c1) from stb1 partition by tbname ") + # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ") # tdSql.checkRows(21) # group by - tdSql.error("select statecount(c1,'GT',1) from ct1 group by c1") - tdSql.error("select statecount(c1,'GT',1) from ct1 group by tbname") - - # super table - - def check_unit_time(self): - tdSql.execute(" use db ") - tdSql.error("select stateduration(c1,'GT',1,1b) from ct1") - tdSql.error("select stateduration(c1,'GT',1,1u) from ct1") - tdSql.error("select stateduration(c1,'GT',1,1000s) from t1") - tdSql.error("select stateduration(c1,'GT',1,10m) from t1") - tdSql.error("select stateduration(c1,'GT',1,10d) from t1") - tdSql.query("select stateduration(c1,'GT',1,1s) from t1") + tdSql.error(f"select statecount(c1,'GT',1) from {dbname}.ct1 group by c1") + tdSql.error(f"select statecount(c1,'GT',1) from {dbname}.ct1 group by tbname") + + def check_unit_time(self, dbname="db"): + tdSql.error(f"select stateduration(c1,'GT',1,1b) from {dbname}.ct1") + tdSql.error(f"select stateduration(c1,'GT',1,1u) from {dbname}.ct1") + tdSql.error(f"select stateduration(c1,'GT',1,1000s) from {dbname}.t1") + tdSql.error(f"select stateduration(c1,'GT',1,10m) from {dbname}.t1") + tdSql.error(f"select stateduration(c1,'GT',1,10d) from {dbname}.t1") + tdSql.query(f"select stateduration(c1,'GT',1,1s) from {dbname}.t1") tdSql.checkData(10,0,63072035) - tdSql.query("select stateduration(c1,'GT',1,1m) from t1") + tdSql.query(f"select stateduration(c1,'GT',1,1m) from {dbname}.t1") tdSql.checkData(10,0,int(63072035/60)) - tdSql.query("select stateduration(c1,'GT',1,1h) from t1") + tdSql.query(f"select stateduration(c1,'GT',1,1h) from {dbname}.t1") tdSql.checkData(10,0,int(63072035/60/60)) - tdSql.query("select stateduration(c1,'GT',1,1d) from t1") + tdSql.query(f"select stateduration(c1,'GT',1,1d) from {dbname}.t1") tdSql.checkData(10,0,int(63072035/60/24/60)) - tdSql.query("select stateduration(c1,'GT',1,1w) from t1") + tdSql.query(f"select stateduration(c1,'GT',1,1w) from {dbname}.t1") tdSql.checkData(10,0,int(63072035/60/7/24/60)) def query_precision(self): def generate_data(precision="ms"): - tdSql.execute("create database if not exists db_%s precision '%s';" %(precision, precision)) + dbname = f"db_{precision}" + tdSql.execute(f"create database if not exists db_%s precision '%s';" %(precision, precision)) tdSql.execute("use db_%s;" %precision) - tdSql.execute("create stable db_%s.st (ts timestamp , id int) tags(ind int);"%precision) - tdSql.execute("create table db_%s.tb1 using st tags(1);"%precision) - tdSql.execute("create table db_%s.tb2 using st tags(2);"%precision) + tdSql.execute(f"create stable db_%s.st (ts timestamp , id int) tags(ind int);"%precision) + tdSql.execute(f"create table db_%s.tb1 using {dbname}.st tags(1);"%precision) + tdSql.execute(f"create table db_%s.tb2 using {dbname}.st tags(2);"%precision) if precision == "ms": start_ts = self.ts @@ -432,55 +425,54 @@ class TDTestCase: if pres == "ms": if unit in ["1u","1b"]: - tdSql.error("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) + tdSql.error(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) pass else: - tdSql.query("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) + tdSql.query(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) elif pres == "us" and unit in ["1b"]: if unit in ["1b"]: - tdSql.error("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) + tdSql.error(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) pass else: - tdSql.query("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) + tdSql.query(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) else: - tdSql.query("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) + tdSql.query(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres)) basic_result = 70 tdSql.checkData(9,0,basic_result*pow(1000,index)) - def check_boundary_values(self): + def check_boundary_values(self, dbname="bound_test"): - tdSql.execute("drop database if exists bound_test") - tdSql.execute("create database if not exists bound_test") - tdSql.execute("use bound_test") + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname}") tdSql.execute( - "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" ) - tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )') tdSql.execute( - f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.error( - f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - tdSql.query("select statecount(c1,'GT',1) from sub1_bound") + tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.sub1_bound") tdSql.checkRows(5) def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring diff --git a/tests/system-test/2-query/substr.py b/tests/system-test/2-query/substr.py index f833a42b574aac2cf8cfcab1bae7035b8273c427..ea55c5e44e780c5acdd86b8be29e8654b8d1251e 100644 --- a/tests/system-test/2-query/substr.py +++ b/tests/system-test/2-query/substr.py @@ -127,16 +127,16 @@ class TDTestCase: return sqls - def __test_current(self): # sourcery skip: use-itertools-product + def __test_current(self, dbname="db"): # sourcery skip: use-itertools-product tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: self.__substr_check(tb, CURRENT_POS, LENS) tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") - def __test_error(self): + def __test_error(self, dbname="db"): tdLog.printNoPrefix("==========err sql condition check , must return error==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: for errsql in self.__substr_err_check(tb): @@ -145,22 +145,21 @@ class TDTestCase: tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") - def all_test(self): - self.__test_current() - self.__test_error() + def all_test(self, dbname="db"): + self.__test_current(dbname) + self.__test_error(dbname) - def __create_tb(self): - tdSql.prepare() + def __create_tb(self, dbname="db"): tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table stb1( + create_stb_sql = f'''create table {dbname}.stb1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp ) tags (tag1 int) ''' - create_ntb_sql = f'''create table t1( + create_ntb_sql = f'''create table {dbname}.t1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp @@ -170,29 +169,29 @@ class TDTestCase: tdSql.execute(create_ntb_sql) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') - def __insert_data(self, rows): + def __insert_data(self, rows, dbname="db"): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) for i in range(rows): tdSql.execute( - f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f'''insert into ct1 values + f'''insert into {dbname}.ct1 values ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) ''' ) tdSql.execute( - f'''insert into ct4 values + f'''insert into {dbname}.ct4 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -208,7 +207,7 @@ class TDTestCase: ) tdSql.execute( - f'''insert into ct2 values + f'''insert into {dbname}.ct2 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -224,13 +223,13 @@ class TDTestCase: ) for i in range(rows): - insert_data = f'''insert into t1 values + insert_data = f'''insert into {dbname}.t1 values ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) ''' tdSql.execute(insert_data) tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -259,10 +258,7 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - tdDnodes.stop(1) - tdDnodes.start(1) - - tdSql.execute("use db") + tdSql.execute("flush database db") tdLog.printNoPrefix("==========step4:after wal, all check again ") self.all_test() diff --git a/tests/system-test/2-query/sum.py b/tests/system-test/2-query/sum.py index 4f5ed34419082d49990f14b6e8518b516c4e7df8..dbc79e25f5ba230723f54507f47da91514698c69 100644 --- a/tests/system-test/2-query/sum.py +++ b/tests/system-test/2-query/sum.py @@ -89,14 +89,14 @@ class TDTestCase: return sqls - def __test_current(self): + def __test_current(self, dbname="db"): tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") tbname = ["ct1", "ct2", "ct4", "t1"] for tb in tbname: self.__sum_current_check(tb) tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") - def __test_error(self): + def __test_error(self, dbname="db"): tdLog.printNoPrefix("==========err sql condition check , must return error==========") tbname = ["ct1", "ct2", "ct4", "t1"] @@ -106,21 +106,21 @@ class TDTestCase: tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") - def all_test(self): - self.__test_current() - self.__test_error() + def all_test(self, dbname="db"): + self.__test_current(dbname) + self.__test_error(dbname) - def __create_tb(self): + def __create_tb(self, dbname="db"): tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table {DBNAME}.stb1( + create_stb_sql = f'''create table {dbname}.stb1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp - ) tags (t1 int) + ) tags (tag1 int) ''' - create_ntb_sql = f'''create table {DBNAME}.t1( + create_ntb_sql = f'''create table {dbname}.t1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp @@ -130,83 +130,82 @@ class TDTestCase: tdSql.execute(create_ntb_sql) for i in range(4): - tdSql.execute(f'create table {DBNAME}.ct{i+1} using {DBNAME}.stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') - def __insert_data(self, rows): + def __insert_data(self, rows, dbname="db"): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) for i in range(rows): tdSql.execute( - f"insert into {DBNAME}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into {DBNAME}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into {DBNAME}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f'''insert into {DBNAME}.ct1 values - ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } ) - ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } ) + f'''insert into {dbname}.ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) ''' ) tdSql.execute( - f'''insert into {DBNAME}.ct4 values + f'''insert into {dbname}.ct4 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, - { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000} + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} ) ( { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, - { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000} + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} ) ''' ) tdSql.execute( - f'''insert into {DBNAME}.ct2 values + f'''insert into {dbname}.ct2 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, - { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000 } + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } ) ( { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, - { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000 } + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } ) ''' ) for i in range(rows): - insert_data = f'''insert into {DBNAME}.t1 values + insert_data = f'''insert into {dbname}.t1 values ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, - "binary_{i}", "nchar_{i}", { now_time - 1000 * i } ) + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) ''' tdSql.execute(insert_data) tdSql.execute( - f'''insert into {DBNAME}.t1 values + f'''insert into {dbname}.t1 values ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, - "binary_limit-1", "nchar_limit-1", { now_time - 86400000 } + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } ) ( { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, - "binary_limit-2", "nchar_limit-2", { now_time - 172800000 } + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } ) ''' ) - def run(self): tdSql.prepare() @@ -219,12 +218,8 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - # tdDnodes.stop(1) - # tdDnodes.start(1) - tdSql.execute("flush database db") - tdSql.execute("use db") tdLog.printNoPrefix("==========step4:after wal, all check again ") diff --git a/tests/system-test/2-query/tail.py b/tests/system-test/2-query/tail.py index d708873d6ff608581a64120a054c81f0b3a8da1f..687023f57ec833248c2c7c472b751019a90f930f 100644 --- a/tests/system-test/2-query/tail.py +++ b/tests/system-test/2-query/tail.py @@ -10,49 +10,46 @@ from util.sql import * from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - - def prepare_datas(self): + + def prepare_datas(self, dbname="db"): tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) - + tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -67,115 +64,115 @@ class TDTestCase: ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ''' ) - - def test_errors(self): + + def test_errors(self, dbname="db"): error_sql_lists = [ - "select tail from t1", - "select tail(123--123)==1 from t1", - "select tail(123,123) from t1", - "select tail(c1,ts) from t1", - "select tail(c1,c1,ts) from t1", - "select tail(c1) as 'd1' from t1", - "select tail(c1 ,c2 ) from t1", - "select tail(c1 ,NULL) from t1", - "select tail(,) from t1;", - "select tail(tail(c1) ab from t1)", - "select tail(c1) as int from t1", - "select tail('c1') from t1", - "select tail(NULL) from t1", - "select tail('') from t1", - "select tail(c%) from t1", - "select tail(t1) from t1", - "select tail(True) from t1", - "select tail(c1,1) , count(c1) from t1", - "select tail(c1,1) , avg(c1) from t1", - "select tail(c1,1) , min(c1) from t1", - "select tail(c1,1) , spread(c1) from t1", - "select tail(c1,1) , diff(c1) from t1", - "select tail from stb1 partition by tbname", - "select tail(123--123)==1 from stb1 partition by tbname", - "select tail(123,123) from stb1 partition by tbname", - "select tail(c1,ts) from stb1 partition by tbname", - "select tail(c1,c1,ts) from stb1 partition by tbname", - "select tail(c1) as 'd1' from stb1 partition by tbname", - "select tail(c1 ,c2 ) from stb1 partition by tbname", - "select tail(c1 ,NULL) from stb1 partition by tbname", - "select tail(,) from stb1 partition by tbname;", - "select tail(tail(c1) ab from stb1 partition by tbname)", - "select tail(c1) as int from stb1 partition by tbname", - "select tail('c1') from stb1 partition by tbname", - "select tail(NULL) from stb1 partition by tbname", - "select tail('') from stb1 partition by tbname", - "select tail(c%) from stb1 partition by tbname", - "select tail(t1) from stb1 partition by tbname", - "select tail(True) from stb1 partition by tbname", - "select tail(c1,1) , count(c1) from stb1 partition by tbname", - "select tail(c1,1) , avg(c1) from stb1 partition by tbname", - "select tail(c1,1) , min(c1) from stb1 partition by tbname", - "select tail(c1,1) , spread(c1) from stb1 partition by tbname", - "select tail(c1,1) , diff(c1) from stb1 partition by tbname", + f"select tail from {dbname}.t1", + f"select tail(123--123)==1 from {dbname}.t1", + f"select tail(123,123) from {dbname}.t1", + f"select tail(c1,ts) from {dbname}.t1", + f"select tail(c1,c1,ts) from {dbname}.t1", + f"select tail(c1) as 'd1' from {dbname}.t1", + f"select tail(c1 ,c2 ) from {dbname}.t1", + f"select tail(c1 ,NULL) from {dbname}.t1", + f"select tail(,) from {dbname}.t1;", + f"select tail(tail(c1) ab from {dbname}.t1)", + f"select tail(c1) as int from {dbname}.t1", + f"select tail('c1') from {dbname}.t1", + f"select tail(NULL) from {dbname}.t1", + f"select tail('') from {dbname}.t1", + f"select tail(c%) from {dbname}.t1", + f"select tail(t1) from {dbname}.t1", + f"select tail(True) from {dbname}.t1", + f"select tail(c1,1) , count(c1) from {dbname}.t1", + f"select tail(c1,1) , avg(c1) from {dbname}.t1", + f"select tail(c1,1) , min(c1) from {dbname}.t1", + f"select tail(c1,1) , spread(c1) from {dbname}.t1", + f"select tail(c1,1) , diff(c1) from {dbname}.t1", + f"select tail from {dbname}.stb1 partition by tbname", + f"select tail(123--123)==1 from {dbname}.stb1 partition by tbname", + f"select tail(123,123) from {dbname}.stb1 partition by tbname", + f"select tail(c1,ts) from {dbname}.stb1 partition by tbname", + f"select tail(c1,c1,ts) from {dbname}.stb1 partition by tbname", + f"select tail(c1) as 'd1' from {dbname}.stb1 partition by tbname", + f"select tail(c1 ,c2 ) from {dbname}.stb1 partition by tbname", + f"select tail(c1 ,NULL) from {dbname}.stb1 partition by tbname", + f"select tail(,) from {dbname}.stb1 partition by tbname;", + f"select tail(tail(c1) ab from {dbname}.stb1 partition by tbname)", + f"select tail(c1) as int from {dbname}.stb1 partition by tbname", + f"select tail('c1') from {dbname}.stb1 partition by tbname", + f"select tail(NULL) from {dbname}.stb1 partition by tbname", + f"select tail('') from {dbname}.stb1 partition by tbname", + f"select tail(c%) from {dbname}.stb1 partition by tbname", + f"select tail(t1) from {dbname}.stb1 partition by tbname", + f"select tail(True) from {dbname}.stb1 partition by tbname", + f"select tail(c1,1) , count(c1) from {dbname}.stb1 partition by tbname", + f"select tail(c1,1) , avg(c1) from {dbname}.stb1 partition by tbname", + f"select tail(c1,1) , min(c1) from {dbname}.stb1 partition by tbname", + f"select tail(c1,1) , spread(c1) from {dbname}.stb1 partition by tbname", + f"select tail(c1,1) , diff(c1) from {dbname}.stb1 partition by tbname", ] for error_sql in error_sql_lists: tdSql.error(error_sql) - - def support_types(self): + + def support_types(self, dbname="db"): other_no_value_types = [ - "select tail(ts,1) from t1" , - "select tail(c7,1) from t1", - "select tail(c8,1) from t1", - "select tail(c9,1) from t1", - "select tail(ts,1) from ct1" , - "select tail(c7,1) from ct1", - "select tail(c8,1) from ct1", - "select tail(c9,1) from ct1", - "select tail(ts,1) from ct3" , - "select tail(c7,1) from ct3", - "select tail(c8,1) from ct3", - "select tail(c9,1) from ct3", - "select tail(ts,1) from ct4" , - "select tail(c7,1) from ct4", - "select tail(c8,1) from ct4", - "select tail(c9,1) from ct4", - "select tail(ts,1) from stb1 partition by tbname" , - "select tail(c7,1) from stb1 partition by tbname", - "select tail(c8,1) from stb1 partition by tbname", - "select tail(c9,1) from stb1 partition by tbname" + f"select tail(ts,1) from {dbname}.t1" , + f"select tail(c7,1) from {dbname}.t1", + f"select tail(c8,1) from {dbname}.t1", + f"select tail(c9,1) from {dbname}.t1", + f"select tail(ts,1) from {dbname}.ct1" , + f"select tail(c7,1) from {dbname}.ct1", + f"select tail(c8,1) from {dbname}.ct1", + f"select tail(c9,1) from {dbname}.ct1", + f"select tail(ts,1) from {dbname}.ct3" , + f"select tail(c7,1) from {dbname}.ct3", + f"select tail(c8,1) from {dbname}.ct3", + f"select tail(c9,1) from {dbname}.ct3", + f"select tail(ts,1) from {dbname}.ct4" , + f"select tail(c7,1) from {dbname}.ct4", + f"select tail(c8,1) from {dbname}.ct4", + f"select tail(c9,1) from {dbname}.ct4", + f"select tail(ts,1) from {dbname}.stb1 partition by tbname" , + f"select tail(c7,1) from {dbname}.stb1 partition by tbname", + f"select tail(c8,1) from {dbname}.stb1 partition by tbname", + f"select tail(c9,1) from {dbname}.stb1 partition by tbname" ] - + for type_sql in other_no_value_types: tdSql.query(type_sql) - + type_sql_lists = [ - "select tail(c1,1) from t1", - "select tail(c2,1) from t1", - "select tail(c3,1) from t1", - "select tail(c4,1) from t1", - "select tail(c5,1) from t1", - "select tail(c6,1) from t1", - - "select tail(c1,1) from ct1", - "select tail(c2,1) from ct1", - "select tail(c3,1) from ct1", - "select tail(c4,1) from ct1", - "select tail(c5,1) from ct1", - "select tail(c6,1) from ct1", - - "select tail(c1,1) from ct3", - "select tail(c2,1) from ct3", - "select tail(c3,1) from ct3", - "select tail(c4,1) from ct3", - "select tail(c5,1) from ct3", - "select tail(c6,1) from ct3", - - "select tail(c1,1) from stb1 partition by tbname", - "select tail(c2,1) from stb1 partition by tbname", - "select tail(c3,1) from stb1 partition by tbname", - "select tail(c4,1) from stb1 partition by tbname", - "select tail(c5,1) from stb1 partition by tbname", - "select tail(c6,1) from stb1 partition by tbname", - - "select tail(c6,1) as alisb from stb1 partition by tbname", - "select tail(c6,1) alisb from stb1 partition by tbname", + f"select tail(c1,1) from {dbname}.t1", + f"select tail(c2,1) from {dbname}.t1", + f"select tail(c3,1) from {dbname}.t1", + f"select tail(c4,1) from {dbname}.t1", + f"select tail(c5,1) from {dbname}.t1", + f"select tail(c6,1) from {dbname}.t1", + + f"select tail(c1,1) from {dbname}.ct1", + f"select tail(c2,1) from {dbname}.ct1", + f"select tail(c3,1) from {dbname}.ct1", + f"select tail(c4,1) from {dbname}.ct1", + f"select tail(c5,1) from {dbname}.ct1", + f"select tail(c6,1) from {dbname}.ct1", + + f"select tail(c1,1) from {dbname}.ct3", + f"select tail(c2,1) from {dbname}.ct3", + f"select tail(c3,1) from {dbname}.ct3", + f"select tail(c4,1) from {dbname}.ct3", + f"select tail(c5,1) from {dbname}.ct3", + f"select tail(c6,1) from {dbname}.ct3", + + f"select tail(c1,1) from {dbname}.stb1 partition by tbname", + f"select tail(c2,1) from {dbname}.stb1 partition by tbname", + f"select tail(c3,1) from {dbname}.stb1 partition by tbname", + f"select tail(c4,1) from {dbname}.stb1 partition by tbname", + f"select tail(c5,1) from {dbname}.stb1 partition by tbname", + f"select tail(c6,1) from {dbname}.stb1 partition by tbname", + + f"select tail(c6,1) as alisb from {dbname}.stb1 partition by tbname", + f"select tail(c6,1) alisb from {dbname}.stb1 partition by tbname", ] for type_sql in type_sql_lists: @@ -189,7 +186,6 @@ class TDTestCase: tail_result = tdSql.queryResult tdSql.query(equal_sql) - print(equal_sql) equal_result = tdSql.queryResult @@ -198,257 +194,255 @@ class TDTestCase: else: tdLog.exit(" tail query check fail , tail sql is: %s " %tail_sql) - def basic_tail_function(self): + def basic_tail_function(self, dbname="db"): - # basic query - tdSql.query("select c1 from ct3") + # basic query + tdSql.query(f"select c1 from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select c1 from t1") + tdSql.query(f"select c1 from {dbname}.t1") tdSql.checkRows(12) - tdSql.query("select c1 from stb1") + tdSql.query(f"select c1 from {dbname}.stb1") tdSql.checkRows(25) # used for empty table , ct3 is empty - tdSql.query("select tail(c1,1) from ct3") + tdSql.query(f"select tail(c1,1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tail(c2,1) from ct3") + tdSql.query(f"select tail(c2,1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tail(c3,1) from ct3") + tdSql.query(f"select tail(c3,1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tail(c4,1) from ct3") + tdSql.query(f"select tail(c4,1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tail(c5,1) from ct3") + tdSql.query(f"select tail(c5,1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tail(c6,1) from ct3") - + tdSql.query(f"select tail(c6,1) from {dbname}.ct3") + # auto check for t1 table # used for regular table - tdSql.query("select tail(c1,1) from t1") - - tdSql.query("desc t1") + tdSql.query(f"select tail(c1,1) from {dbname}.t1") + + tdSql.query(f"desc {dbname}.t1") col_lists_rows = tdSql.queryResult col_lists = [] for col_name in col_lists_rows: if col_name[0] =="ts": continue - + col_lists.append(col_name[0]) - + for col in col_lists: - for loop in range(100): + for loop in range(100): limit = randint(1,100) offset = randint(0,100) - self.check_tail_table("t1" , col , limit , offset) + self.check_tail_table(f"{dbname}.t1" , col , limit , offset) # tail for invalid params - - tdSql.error("select tail(c1,-10,10) from ct1") - tdSql.error("select tail(c1,10,10000) from ct1") - tdSql.error("select tail(c1,10,-100) from ct1") - tdSql.error("select tail(c1,100/2,10) from ct1") - tdSql.error("select tail(c1,5,10*2) from ct1") - tdSql.query("select tail(c1,100,100) from ct1") + + tdSql.error(f"select tail(c1,-10,10) from {dbname}.ct1") + tdSql.error(f"select tail(c1,10,10000) from {dbname}.ct1") + tdSql.error(f"select tail(c1,10,-100) from {dbname}.ct1") + tdSql.error(f"select tail(c1,100/2,10) from {dbname}.ct1") + tdSql.error(f"select tail(c1,5,10*2) from {dbname}.ct1") + tdSql.query(f"select tail(c1,100,100) from {dbname}.ct1") tdSql.checkRows(0) - tdSql.query("select tail(c1,10,100) from ct1") + tdSql.query(f"select tail(c1,10,100) from {dbname}.ct1") tdSql.checkRows(0) - tdSql.error("select tail(c1,10,101) from ct1") - tdSql.query("select tail(c1,10,0) from ct1") - tdSql.query("select tail(c1,100,10) from ct1") + tdSql.error(f"select tail(c1,10,101) from {dbname}.ct1") + tdSql.query(f"select tail(c1,10,0) from {dbname}.ct1") + tdSql.query(f"select tail(c1,100,10) from {dbname}.ct1") tdSql.checkRows(3) - + # tail with super tags - tdSql.query("select tail(c1,10,10) from ct1") + tdSql.query(f"select tail(c1,10,10) from {dbname}.ct1") tdSql.checkRows(3) - tdSql.query("select tail(c1,10,10),tbname from ct1") - tdSql.query("select tail(c1,10,10),t1 from ct1") + tdSql.query(f"select tail(c1,10,10),tbname from {dbname}.ct1") + tdSql.query(f"select tail(c1,10,10),t1 from {dbname}.ct1") + + # tail with common col + tdSql.query(f"select tail(c1,10,10) ,ts from {dbname}.ct1") + tdSql.query(f"select tail(c1,10,10) ,c1 from {dbname}.ct1") - # tail with common col - tdSql.query("select tail(c1,10,10) ,ts from ct1") - tdSql.query("select tail(c1,10,10) ,c1 from ct1") + # tail with scalar function + tdSql.query(f"select tail(c1,10,10) ,abs(c1) from {dbname}.ct1") + tdSql.error(f"select tail(c1,10,10) , tail(c2,10,10) from {dbname}.ct1") + tdSql.query(f"select tail(c1,10,10) , abs(c2)+2 from {dbname}.ct1") - # tail with scalar function - tdSql.query("select tail(c1,10,10) ,abs(c1) from ct1") - tdSql.error("select tail(c1,10,10) , tail(c2,10,10) from ct1") - tdSql.query("select tail(c1,10,10) , abs(c2)+2 from ct1") - # bug need fix for scalar value or compute again - # tdSql.error(" select tail(c1,10,10) , 123 from ct1") - # tdSql.error(" select abs(tail(c1,10,10)) from ct1") - # tdSql.error(" select abs(tail(c1,10,10)) + 2 from ct1") + # tdSql.error(f"select tail(c1,10,10) , 123 from {dbname}.ct1") + # tdSql.error(f"select abs(tail(c1,10,10)) from {dbname}.ct1") + # tdSql.error(f"select abs(tail(c1,10,10)) + 2 from {dbname}.ct1") - # tail with aggregate function - tdSql.error("select tail(c1,10,10) ,sum(c1) from ct1") - tdSql.error("select tail(c1,10,10) ,max(c1) from ct1") - tdSql.error("select tail(c1,10,10) ,csum(c1) from ct1") - tdSql.error("select tail(c1,10,10) ,count(c1) from ct1") + # tail with aggregate function + tdSql.error(f"select tail(c1,10,10) ,sum(c1) from {dbname}.ct1") + tdSql.error(f"select tail(c1,10,10) ,max(c1) from {dbname}.ct1") + tdSql.error(f"select tail(c1,10,10) ,csum(c1) from {dbname}.ct1") + tdSql.error(f"select tail(c1,10,10) ,count(c1) from {dbname}.ct1") # tail with filter where - tdSql.query("select tail(c1,3,1) from ct4 where c1 is null") + tdSql.query(f"select tail(c1,3,1) from {dbname}.ct4 where c1 is null") tdSql.checkData(0, 0, None) tdSql.checkData(1, 0, None) - tdSql.query("select tail(c1,3,2) from ct4 where c1 >2 order by 1") + tdSql.query(f"select tail(c1,3,2) from {dbname}.ct4 where c1 >2 order by 1") tdSql.checkData(0, 0, 5) tdSql.checkData(1, 0, 6) tdSql.checkData(2, 0, 7) - tdSql.query("select tail(c1,2,1) from ct4 where c2 between 0 and 99999 order by 1") + tdSql.query(f"select tail(c1,2,1) from {dbname}.ct4 where c2 between 0 and 99999 order by 1") tdSql.checkData(0, 0, 1) tdSql.checkData(1, 0, 2) - # tail with union all - tdSql.query("select tail(c1,2,1) from ct4 union all select c1 from ct1") + # tail with union all + tdSql.query(f"select tail(c1,2,1) from {dbname}.ct4 union all select c1 from {dbname}.ct1") tdSql.checkRows(15) - tdSql.query("select tail(c1,2,1) from ct4 union all select c1 from ct2 order by 1") + tdSql.query(f"select tail(c1,2,1) from {dbname}.ct4 union all select c1 from {dbname}.ct2 order by 1") tdSql.checkRows(2) tdSql.checkData(0, 0, 0) tdSql.checkData(1, 0, 1) - tdSql.query("select tail(c2,2,1) from ct4 union all select abs(c2)/2 from ct4") + tdSql.query(f"select tail(c2,2,1) from {dbname}.ct4 union all select abs(c2)/2 from {dbname}.ct4") tdSql.checkRows(14) - # tail with join - # prepare join datas with same ts + # tail with join + # prepare join datas with same ts - tdSql.execute(" use db ") - tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)") - tdSql.execute(" create table tb1 using st1 tags(1)") - tdSql.execute(" create table tb2 using st1 tags(2)") + tdSql.execute(f" create stable {dbname}.st1 (ts timestamp , num int) tags(ind int)") + tdSql.execute(f" create table {dbname}.tb1 using {dbname}.st1 tags(1)") + tdSql.execute(f" create table {dbname}.tb2 using {dbname}.st1 tags(2)") - tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)") - tdSql.execute(" create table ttb1 using st2 tags(1)") - tdSql.execute(" create table ttb2 using st2 tags(2)") + tdSql.execute(f" create stable {dbname}.st2 (ts timestamp , num int) tags(ind int)") + tdSql.execute(f" create table {dbname}.ttb1 using {dbname}.st2 tags(1)") + tdSql.execute(f" create table {dbname}.ttb2 using {dbname}.st2 tags(2)") start_ts = 1622369635000 # 2021-05-30 18:13:55 for i in range(10): ts_value = start_ts+i*1000 - tdSql.execute(f" insert into tb1 values({ts_value} , {i})") - tdSql.execute(f" insert into tb2 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.tb1 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.tb2 values({ts_value} , {i})") - tdSql.execute(f" insert into ttb1 values({ts_value} , {i})") - tdSql.execute(f" insert into ttb2 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.ttb1 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.ttb2 values({ts_value} , {i})") - tdSql.query("select tail(tb2.num,3,2) from tb1, tb2 where tb1.ts=tb2.ts order by 1 desc") + tdSql.query(f"select tail(tb2.num,3,2) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts order by 1 desc") tdSql.checkRows(3) tdSql.checkData(0,0,7) tdSql.checkData(1,0,6) tdSql.checkData(2,0,5) # nest query - # tdSql.query("select tail(c1,2) from (select _rowts , c1 from ct1)") - tdSql.query("select c1 from (select tail(c1,2) c1 from ct4) order by 1 nulls first") + # tdSql.query(f"select tail(c1,2) from (select _rowts , c1 from {dbname}.ct1)") + tdSql.query(f"select c1 from (select tail(c1,2) c1 from {dbname}.ct4) order by 1 nulls first") tdSql.checkRows(2) tdSql.checkData(0, 0, None) tdSql.checkData(1, 0, 0) - tdSql.query("select sum(c1) from (select tail(c1,2) c1 from ct1)") + tdSql.query(f"select sum(c1) from (select tail(c1,2) c1 from {dbname}.ct1)") tdSql.checkRows(1) tdSql.checkData(0, 0, 18) - tdSql.query("select abs(c1) from (select tail(c1,2) c1 from ct1)") + tdSql.query(f"select abs(c1) from (select tail(c1,2) c1 from {dbname}.ct1)") tdSql.checkRows(2) tdSql.checkData(0, 0, 9) - + #partition by tbname - tdSql.query(" select tail(c1,5) from stb1 partition by tbname ") + tdSql.query(f"select tail(c1,5) from {dbname}.stb1 partition by tbname ") tdSql.checkRows(10) - tdSql.query(" select tail(c1,3) from stb1 partition by tbname ") + tdSql.query(f"select tail(c1,3) from {dbname}.stb1 partition by tbname ") tdSql.checkRows(6) - - # group by - tdSql.error("select tail(c1,2) from ct1 group by c1") - tdSql.error("select tail(c1,2) from ct1 group by tbname") + + # group by + tdSql.error(f"select tail(c1,2) from {dbname}.ct1 group by c1") + tdSql.error(f"select tail(c1,2) from {dbname}.ct1 group by tbname") # super table - tdSql.error("select tbname , tail(c1,2) from stb1 group by tbname") - tdSql.query("select tail(c1,2) from stb1 partition by tbname") + tdSql.error(f"select tbname , tail(c1,2) from {dbname}.stb1 group by tbname") + tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname") tdSql.checkRows(4) - # bug need fix - # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname") + # bug need fix + # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname") # tdSql.checkRows(4) - # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname order by tbname") + # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname order by tbname") # tdSql.checkRows(4) - # tdSql.query(" select tbname , count(c1) from stb1 partition by tbname order by tbname ") + # tdSql.query(f"select tbname , count(c1) from {dbname}.stb1 partition by tbname order by tbname ") # tdSql.checkRows(2) - # tdSql.query(" select tbname , max(c1) ,c1 from stb1 partition by tbname order by tbname ") + # tdSql.query(f"select tbname , max(c1) ,c1 from {dbname}.stb1 partition by tbname order by tbname ") # tdSql.checkRows(2) - # tdSql.query(" select tbname ,first(c1) from stb1 partition by tbname order by tbname ") + # tdSql.query(f"select tbname ,first(c1) from {dbname}.stb1 partition by tbname order by tbname ") # tdSql.checkRows(2) - tdSql.query("select tail(c1,2) from stb1 partition by tbname") + tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname") tdSql.checkRows(4) - # # bug need fix - # tdSql.query(" select tbname , tail(c1,2) from stb1 where t1 = 0 partition by tbname ") + # # bug need fix + # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where t1 = 0 partition by tbname ") # tdSql.checkRows(2) - # tdSql.query(" select tbname , tail(c1,2) from stb1 where t1 = 0 partition by tbname order by tbname ") + # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where t1 = 0 partition by tbname order by tbname ") # tdSql.checkRows(2) - # tdSql.query(" select tbname , tail(c1,2) from stb1 where c1 = 0 partition by tbname order by tbname ") + # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where c1 = 0 partition by tbname order by tbname ") # tdSql.checkRows(3) - # tdSql.query(" select tbname , tail(c1,2) from stb1 where c1 = 0 partition by tbname ") + # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where c1 = 0 partition by tbname ") # tdSql.checkRows(3) - # tdSql.query(" select tbname , tail(c1,2) from stb1 where c1 = 0 partition by tbname ") + # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where c1 = 0 partition by tbname ") # tdSql.checkRows(3) - tdSql.query(" select tail(t1,2) from stb1 ") + tdSql.query(f"select tail(t1,2) from {dbname}.stb1 ") tdSql.checkRows(2) - tdSql.query(" select tail(t1+c1,2) from stb1 ") + tdSql.query(f"select tail(t1+c1,2) from {dbname}.stb1 ") tdSql.checkRows(2) - tdSql.query(" select tail(t1+c1,2) from stb1 partition by tbname ") + tdSql.query(f"select tail(t1+c1,2) from {dbname}.stb1 partition by tbname ") tdSql.checkRows(4) - tdSql.query(" select tail(t1,2) from stb1 partition by tbname ") + tdSql.query(f"select tail(t1,2) from {dbname}.stb1 partition by tbname ") tdSql.checkRows(4) - # nest query - tdSql.query(" select tail(c1,2) from (select _rowts , t1 ,c1 , tbname from stb1 ) ") + # nest query + tdSql.query(f"select tail(c1,2) from (select _rowts , t1 ,c1 , tbname from {dbname}.stb1 ) ") tdSql.checkRows(2) tdSql.checkData(0,0,None) tdSql.checkData(1,0,9) - tdSql.query("select tail(t1,2) from (select _rowts , t1 , tbname from stb1 )") + tdSql.query(f"select tail(t1,2) from (select _rowts , t1 , tbname from {dbname}.stb1 )") tdSql.checkRows(2) tdSql.checkData(0,0,4) tdSql.checkData(1,0,1) - def check_boundary_values(self): + def check_boundary_values(self, dbname="bound_test"): - tdSql.execute("drop database if exists bound_test") - tdSql.execute("create database if not exists bound_test") - tdSql.execute("use bound_test") + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname}") tdSql.execute( - "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" ) - tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )') tdSql.execute( - f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - + tdSql.error( - f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - - tdSql.query("select tail(c2,2) from sub1_bound order by 1 desc") + + tdSql.query(f"select tail(c2,2) from {dbname}.sub1_bound order by 1 desc") tdSql.checkRows(2) tdSql.checkData(0,0,9223372036854775803) @@ -456,22 +450,22 @@ class TDTestCase: tdSql.prepare() tdLog.printNoPrefix("==========step1:create table ==============") - + self.prepare_datas() - tdLog.printNoPrefix("==========step2:test errors ==============") + tdLog.printNoPrefix("==========step2:test errors ==============") self.test_errors() - - tdLog.printNoPrefix("==========step3:support types ============") + + tdLog.printNoPrefix("==========step3:support types ============") self.support_types() - tdLog.printNoPrefix("==========step4: tail basic query ============") + tdLog.printNoPrefix("==========step4: tail basic query ============") self.basic_tail_function() - tdLog.printNoPrefix("==========step5: tail boundary query ============") + tdLog.printNoPrefix("==========step5: tail boundary query ============") self.check_boundary_values() diff --git a/tests/system-test/2-query/tan.py b/tests/system-test/2-query/tan.py index da47c1c2b2560bf617681df10e8788f518b11ac1..683cee37ff7c81ca45b628852134ddbab6e342cf 100644 --- a/tests/system-test/2-query/tan.py +++ b/tests/system-test/2-query/tan.py @@ -9,48 +9,46 @@ from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} + def init(self, conn, powSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - - def prepare_datas(self): + + def prepare_datas(self, dbname="db"): tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) - + tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -61,18 +59,18 @@ class TDTestCase: ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) - ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999, -999, -99, -9.99, -99999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ''' ) - + def check_result_auto_tan(self ,origin_query , pow_query): pow_result = tdSql.getResult(pow_query) origin_result = tdSql.getResult(origin_query) auto_result =[] - + for row in origin_result: row_check = [] for elem in row: @@ -82,190 +80,178 @@ class TDTestCase: elem = math.tan(elem) row_check.append(elem) auto_result.append(row_check) - - check_status = True - + tdSql.query(pow_query) for row_index , row in enumerate(pow_result): for col_index , elem in enumerate(row): - if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): - check_status = False - elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): - check_status = False - else: - pass - if not check_status: - tdLog.notice("tan function value has not as expected , sql is \"%s\" "%pow_query ) - sys.exit(1) - else: - tdLog.info("tan value check pass , it work as expected ,sql is \"%s\" "%pow_query ) - - def test_errors(self): + tdSql.checkData(row_index , col_index ,auto_result[row_index][col_index] ) + + def test_errors(self, dbname="db"): error_sql_lists = [ - "select tan from t1", - # "select tan(-+--+c1 ) from t1", - # "select +-tan(c1) from t1", - # "select ++-tan(c1) from t1", - # "select ++--tan(c1) from t1", - # "select - -tan(c1)*0 from t1", - # "select tan(tbname+1) from t1 ", - "select tan(123--123)==1 from t1", - "select tan(c1) as 'd1' from t1", - "select tan(c1 ,c2) from t1", - "select tan(c1 ,NULL ) from t1", - "select tan(,) from t1;", - "select tan(tan(c1) ab from t1)", - "select tan(c1 ) as int from t1", - "select tan from stb1", - # "select tan(-+--+c1) from stb1", - # "select +-tan(c1) from stb1", - # "select ++-tan(c1) from stb1", - # "select ++--tan(c1) from stb1", - # "select - -tan(c1)*0 from stb1", - # "select tan(tbname+1) from stb1 ", - "select tan(123--123)==1 from stb1", - "select tan(c1) as 'd1' from stb1", - "select tan(c1 ,c2 ) from stb1", - "select tan(c1 ,NULL) from stb1", - "select tan(,) from stb1;", - "select tan(tan(c1) ab from stb1)", - "select tan(c1) as int from stb1" + f"select tan from {dbname}.t1", + # f"select tan(-+--+c1 ) from {dbname}.t1", + # f"select +-tan(c1) from {dbname}.t1", + # f"select ++-tan(c1) from {dbname}.t1", + # f"select ++--tan(c1) from {dbname}.t1", + # f"select - -tan(c1)*0 from {dbname}.t1", + # f"select tan(tbname+1) from {dbname}.t1 ", + f"select tan(123--123)==1 from {dbname}.t1", + f"select tan(c1) as 'd1' from {dbname}.t1", + f"select tan(c1 ,c2) from {dbname}.t1", + f"select tan(c1 ,NULL ) from {dbname}.t1", + f"select tan(,) from {dbname}.t1;", + f"select tan(tan(c1) ab from {dbname}.t1)", + f"select tan(c1 ) as int from {dbname}.t1", + f"select tan from {dbname}.stb1", + # f"select tan(-+--+c1) from {dbname}.stb1", + # f"select +-tan(c1) from {dbname}.stb1", + # f"select ++-tan(c1) from {dbname}.stb1", + # f"select ++--tan(c1) from {dbname}.stb1", + # f"select - -tan(c1)*0 from {dbname}.stb1", + # f"select tan(tbname+1) from {dbname}.stb1 ", + f"select tan(123--123)==1 from {dbname}.stb1", + f"select tan(c1) as 'd1' from {dbname}.stb1", + f"select tan(c1 ,c2 ) from {dbname}.stb1", + f"select tan(c1 ,NULL) from {dbname}.stb1", + f"select tan(,) from {dbname}.stb1;", + f"select tan(tan(c1) ab from {dbname}.stb1)", + f"select tan(c1) as int from {dbname}.stb1" ] for error_sql in error_sql_lists: tdSql.error(error_sql) - - def support_types(self): + + def support_types(self, dbname="db"): type_error_sql_lists = [ - "select tan(ts) from t1" , - "select tan(c7) from t1", - "select tan(c8) from t1", - "select tan(c9) from t1", - "select tan(ts) from ct1" , - "select tan(c7) from ct1", - "select tan(c8) from ct1", - "select tan(c9) from ct1", - "select tan(ts) from ct3" , - "select tan(c7) from ct3", - "select tan(c8) from ct3", - "select tan(c9) from ct3", - "select tan(ts) from ct4" , - "select tan(c7) from ct4", - "select tan(c8) from ct4", - "select tan(c9) from ct4", - "select tan(ts) from stb1" , - "select tan(c7) from stb1", - "select tan(c8) from stb1", - "select tan(c9) from stb1" , - - "select tan(ts) from stbbb1" , - "select tan(c7) from stbbb1", - - "select tan(ts) from tbname", - "select tan(c9) from tbname" + f"select tan(ts) from {dbname}.t1" , + f"select tan(c7) from {dbname}.t1", + f"select tan(c8) from {dbname}.t1", + f"select tan(c9) from {dbname}.t1", + f"select tan(ts) from {dbname}.ct1" , + f"select tan(c7) from {dbname}.ct1", + f"select tan(c8) from {dbname}.ct1", + f"select tan(c9) from {dbname}.ct1", + f"select tan(ts) from {dbname}.ct3" , + f"select tan(c7) from {dbname}.ct3", + f"select tan(c8) from {dbname}.ct3", + f"select tan(c9) from {dbname}.ct3", + f"select tan(ts) from {dbname}.ct4" , + f"select tan(c7) from {dbname}.ct4", + f"select tan(c8) from {dbname}.ct4", + f"select tan(c9) from {dbname}.ct4", + f"select tan(ts) from {dbname}.stb1" , + f"select tan(c7) from {dbname}.stb1", + f"select tan(c8) from {dbname}.stb1", + f"select tan(c9) from {dbname}.stb1" , + + f"select tan(ts) from {dbname}.stbbb1" , + f"select tan(c7) from {dbname}.stbbb1", + + f"select tan(ts) from {dbname}.tbname", + f"select tan(c9) from {dbname}.tbname" ] - + for type_sql in type_error_sql_lists: tdSql.error(type_sql) - - + + type_sql_lists = [ - "select tan(c1) from t1", - "select tan(c2) from t1", - "select tan(c3) from t1", - "select tan(c4) from t1", - "select tan(c5) from t1", - "select tan(c6) from t1", - - "select tan(c1) from ct1", - "select tan(c2) from ct1", - "select tan(c3) from ct1", - "select tan(c4) from ct1", - "select tan(c5) from ct1", - "select tan(c6) from ct1", - - "select tan(c1) from ct3", - "select tan(c2) from ct3", - "select tan(c3) from ct3", - "select tan(c4) from ct3", - "select tan(c5) from ct3", - "select tan(c6) from ct3", - - "select tan(c1) from stb1", - "select tan(c2) from stb1", - "select tan(c3) from stb1", - "select tan(c4) from stb1", - "select tan(c5) from stb1", - "select tan(c6) from stb1", - - "select tan(c6) as alisb from stb1", - "select tan(c6) alisb from stb1", + f"select tan(c1) from {dbname}.t1", + f"select tan(c2) from {dbname}.t1", + f"select tan(c3) from {dbname}.t1", + f"select tan(c4) from {dbname}.t1", + f"select tan(c5) from {dbname}.t1", + f"select tan(c6) from {dbname}.t1", + + f"select tan(c1) from {dbname}.ct1", + f"select tan(c2) from {dbname}.ct1", + f"select tan(c3) from {dbname}.ct1", + f"select tan(c4) from {dbname}.ct1", + f"select tan(c5) from {dbname}.ct1", + f"select tan(c6) from {dbname}.ct1", + + f"select tan(c1) from {dbname}.ct3", + f"select tan(c2) from {dbname}.ct3", + f"select tan(c3) from {dbname}.ct3", + f"select tan(c4) from {dbname}.ct3", + f"select tan(c5) from {dbname}.ct3", + f"select tan(c6) from {dbname}.ct3", + + f"select tan(c1) from {dbname}.stb1", + f"select tan(c2) from {dbname}.stb1", + f"select tan(c3) from {dbname}.stb1", + f"select tan(c4) from {dbname}.stb1", + f"select tan(c5) from {dbname}.stb1", + f"select tan(c6) from {dbname}.stb1", + + f"select tan(c6) as alisb from {dbname}.stb1", + f"select tan(c6) alisb from {dbname}.stb1", ] for type_sql in type_sql_lists: tdSql.query(type_sql) - - def basic_tan_function(self): - # basic query - tdSql.query("select c1 from ct3") + def basic_tan_function(self, dbname="db"): + + # basic query + tdSql.query(f"select c1 from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select c1 from t1") + tdSql.query(f"select c1 from {dbname}.t1") tdSql.checkRows(12) - tdSql.query("select c1 from stb1") + tdSql.query(f"select c1 from {dbname}.stb1") tdSql.checkRows(25) # used for empty table , ct3 is empty - tdSql.query("select tan(c1) from ct3") + tdSql.query(f"select tan(c1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tan(c2) from ct3") + tdSql.query(f"select tan(c2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tan(c3) from ct3") + tdSql.query(f"select tan(c3) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tan(c4) from ct3") + tdSql.query(f"select tan(c4) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tan(c5) from ct3") + tdSql.query(f"select tan(c5) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select tan(c6) from ct3") + tdSql.query(f"select tan(c6) from {dbname}.ct3") tdSql.checkRows(0) # # used for regular table - tdSql.query("select tan(c1) from t1") + tdSql.query(f"select tan(c1) from {dbname}.t1") tdSql.checkData(0, 0, None) tdSql.checkData(1 , 0, 1.557407725) tdSql.checkData(3 , 0, -0.142546543) tdSql.checkData(5 , 0, None) - tdSql.query("select c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 4, 1.11000) tdSql.checkData(3, 3, 33) tdSql.checkData(5, 4, None) - tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1") + tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1") tdSql.checkData(1, 5, 1.11000) tdSql.checkData(3, 4, 33) tdSql.checkData(5, 5, None) - self.check_result_auto_tan( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)), tan(abs(c5)) from t1") - + self.check_result_auto_tan( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.t1", f"select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)) from {dbname}.t1") + # used for sub table - tdSql.query("select c2 ,tan(c2) from ct1") + tdSql.query(f"select c2 ,tan(c2) from {dbname}.ct1") tdSql.checkData(0, 1, -0.226288661) tdSql.checkData(1 , 1, 0.670533806) tdSql.checkData(3 , 1, -1.325559275) tdSql.checkData(4 , 1, 0.000000000) - tdSql.query("select c1, c5 ,tan(c5) from ct4") + tdSql.query(f"select c1, c5 ,tan(c5) from {dbname}.ct4") tdSql.checkData(0 , 2, None) tdSql.checkData(1 , 2, -0.605942929) tdSql.checkData(2 , 2, 11.879355609) tdSql.checkData(3 , 2, 0.395723765) tdSql.checkData(5 , 2, None) - self.check_result_auto_tan( "select c1, c2, c3 , c4, c5 from ct1", "select tan(c1), tan(c2) ,tan(c3), tan(c4), tan(c5) from ct1") - + self.check_result_auto_tan( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select tan(c1), tan(c2) ,tan(c3), tan(c4), tan(c5) from {dbname}.ct1") + # nest query for tan functions - tdSql.query("select c4 , tan(c4) ,tan(tan(c4)) , tan(tan(tan(c4))) from ct1;") + tdSql.query(f"select c4 , tan(c4) ,tan(tan(c4)) , tan(tan(tan(c4))) from {dbname}.ct1;") tdSql.checkData(0 , 0 , 88) tdSql.checkData(0 , 1 , 0.035420501) tdSql.checkData(0 , 2 , 0.035435322) @@ -281,52 +267,52 @@ class TDTestCase: tdSql.checkData(11 , 2 , -0.040227928) tdSql.checkData(11 , 3 , -0.040249642) - # used for stable table - - tdSql.query("select tan(c1) from stb1") + # used for stable table + + tdSql.query(f"select tan(c1) from {dbname}.stb1") tdSql.checkRows(25) - + # used for not exists table - tdSql.error("select tan(c1) from stbbb1") - tdSql.error("select tan(c1) from tbname") - tdSql.error("select tan(c1) from ct5") + tdSql.error(f"select tan(c1) from {dbname}.stbbb1") + tdSql.error(f"select tan(c1) from {dbname}.tbname") + tdSql.error(f"select tan(c1) from {dbname}.ct5") + + # mix with common col + tdSql.query(f"select c1, tan(c1) from {dbname}.ct1") + tdSql.query(f"select c2, tan(c2) from {dbname}.ct4") - # mix with common col - tdSql.query("select c1, tan(c1) from ct1") - tdSql.query("select c2, tan(c2) from ct4") - # mix with common functions - tdSql.query("select c1, tan(c1),tan(c1), tan(tan(c1)) from ct4 ") + tdSql.query(f"select c1, tan(c1),tan(c1), tan(tan(c1)) from {dbname}.ct4 ") tdSql.checkData(0 , 0 ,None) tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 2 ,None) tdSql.checkData(0 , 3 ,None) - + tdSql.checkData(3 , 0 , 6) tdSql.checkData(3 , 1 ,-0.291006191) tdSql.checkData(3 , 2 ,-0.291006191) tdSql.checkData(3 , 3 ,-0.299508909) - tdSql.query("select c1, tan(c1),c5, floor(c5) from stb1 ") + tdSql.query(f"select c1, tan(c1),c5, floor(c5) from {dbname}.stb1 ") # # mix with agg functions , not support - tdSql.error("select c1, tan(c1),c5, count(c5) from stb1 ") - tdSql.error("select c1, tan(c1),c5, count(c5) from ct1 ") - tdSql.error("select tan(c1), count(c5) from stb1 ") - tdSql.error("select tan(c1), count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from ct1 ") - tdSql.error("select c1, count(c5) from stb1 ") + tdSql.error(f"select c1, tan(c1),c5, count(c5) from {dbname}.stb1 ") + tdSql.error(f"select c1, tan(c1),c5, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select tan(c1), count(c5) from {dbname}.stb1 ") + tdSql.error(f"select tan(c1), count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ") + tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ") # agg functions mix with agg functions - tdSql.query("select max(c5), count(c5) from stb1") - tdSql.query("select max(c5), count(c5) from ct1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1") + tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1") + - - # # bug fix for compute - tdSql.query("select c1, tan(c1) -0 ,tan(c1-4)-0 from ct4 ") + # # bug fix for compute + tdSql.query(f"select c1, tan(c1) -0 ,tan(c1-4)-0 from {dbname}.ct4 ") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -334,7 +320,7 @@ class TDTestCase: tdSql.checkData(1, 1, -6.799711455) tdSql.checkData(1, 2, 1.157821282) - tdSql.query(" select c1, tan(c1) -0 ,tan(c1-0.1)-0.1 from ct4") + tdSql.query(f"select c1, tan(c1) -0 ,tan(c1-0.1)-0.1 from {dbname}.ct4") tdSql.checkData(0, 0, None) tdSql.checkData(0, 1, None) tdSql.checkData(0, 2, None) @@ -342,35 +328,33 @@ class TDTestCase: tdSql.checkData(1, 1, -6.799711455) tdSql.checkData(1, 2, -21.815112681) - tdSql.query("select c1, tan(c1), c2, tan(c2), c3, tan(c3) from ct1") + tdSql.query(f"select c1, tan(c1), c2, tan(c2), c3, tan(c3) from {dbname}.ct1") - def test_big_number(self): + def test_big_number(self, dbname="db"): - tdSql.query("select c1, tan(100000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, tan(100000000) from {dbname}.ct1") # bigint to double data overflow tdSql.checkData(4, 1, math.tan(100000000)) - - tdSql.query("select c1, tan(10000000000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, tan(10000000000000) from {dbname}.ct1") # bigint to double data overflow tdSql.checkData(4, 1, math.tan(10000000000000)) - tdSql.query("select c1, tan(10000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, tan(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, tan(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, tan(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(1, 1, math.tan(10000000000000000000000000.0)) - tdSql.query("select c1, tan(10000000000000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, tan(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, tan(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, tan(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(4, 1, math.tan(10000000000000000000000000000000000.0)) - tdSql.query("select c1, tan(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow - tdSql.query("select c1, tan(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value + tdSql.query(f"select c1, tan(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow + tdSql.query(f"select c1, tan(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value tdSql.checkData(4, 1, math.tan(10000000000000000000000000000000000000000.0)) - tdSql.query("select c1, tan(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow + tdSql.query(f"select c1, tan(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow - def abs_func_filter(self): - tdSql.execute("use db") - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from ct4 where c1>5 ") + def abs_func_filter(self, dbname="db"): + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from {dbname}.ct4 where c1>5 ") tdSql.checkRows(3) tdSql.checkData(0,0,8) tdSql.checkData(0,1,8.000000000) @@ -378,7 +362,7 @@ class TDTestCase: tdSql.checkData(0,3,7.900000000) tdSql.checkData(0,4,-7.000000000) - tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from ct4 where c1=5 ") + tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from {dbname}.ct4 where c1=5 ") tdSql.checkRows(1) tdSql.checkData(0,0,5) tdSql.checkData(0,1,5.000000000) @@ -386,7 +370,7 @@ class TDTestCase: tdSql.checkData(0,3,4.900000000) tdSql.checkData(0,4,-3.000000000) - tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from ct4 where c1>tan(c1) limit 1 ") + tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from {dbname}.ct4 where c1>tan(c1) limit 1 ") tdSql.checkRows(1) tdSql.checkData(0,0,8) tdSql.checkData(0,1,88888) @@ -394,45 +378,40 @@ class TDTestCase: tdSql.checkData(0,3,8.000000000) tdSql.checkData(0,4,7.900000000) tdSql.checkData(0,5,-7.000000000) - - def pow_Arithmetic(self): - pass - - def check_boundary_values(self): + + def check_boundary_values(self, dbname="bound_test"): PI=3.1415926 - tdSql.execute("drop database if exists bound_test") - tdSql.execute("create database if not exists bound_test") - time.sleep(3) - tdSql.execute("use bound_test") + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname}") tdSql.execute( - "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" ) - tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )') tdSql.execute( - f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.error( - f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - self.check_result_auto_tan( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)), tan(abs(c5)) from sub1_bound") - - self.check_result_auto_tan( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select tan(c1), tan(c2) ,tan(c3), tan(c3), tan(c2) ,tan(c1) from sub1_bound") + self.check_result_auto_tan( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.sub1_bound ", f"select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)) from {dbname}.sub1_bound") + + self.check_result_auto_tan( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select tan(c1), tan(c2) ,tan(c3), tan(c3), tan(c2) ,tan(c1) from {dbname}.sub1_bound") + + self.check_result_auto_tan(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select tan(abs(c1)) from {dbname}.sub1_bound" ) - self.check_result_auto_tan("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select tan(abs(c1)) from sub1_bound" ) - # check basic elem for table per row - tdSql.query("select tan(abs(c1)) ,tan(abs(c2)) , tan(abs(c3)) , tan(abs(c4)), tan(abs(c5)), tan(abs(c6)) from sub1_bound ") + tdSql.query(f"select tan(abs(c1)) ,tan(abs(c2)) , tan(abs(c3)) , tan(abs(c4)), tan(abs(c5)), tan(abs(c6)) from {dbname}.sub1_bound ") tdSql.checkData(0,0,math.tan(2147483647)) tdSql.checkData(0,1,math.tan(9223372036854775807)) tdSql.checkData(0,2,math.tan(32767)) @@ -450,76 +429,71 @@ class TDTestCase: tdSql.checkData(3,4,math.tan(339999995214436424907732413799364296704.00000)) # check + - * / in functions - tdSql.query("select tan(abs(c1+1)) ,tan(abs(c2)) , tan(abs(c3*1)) , tan(abs(c4/2)), tan(abs(c5))/2, tan(abs(c6)) from sub1_bound ") + tdSql.query(f"select tan(abs(c1+1)) ,tan(abs(c2)) , tan(abs(c3*1)) , tan(abs(c4/2)), tan(abs(c5))/2, tan(abs(c6)) from {dbname}.sub1_bound ") tdSql.checkData(0,0,math.tan(2147483648.000000000)) tdSql.checkData(0,1,math.tan(9223372036854775807)) tdSql.checkData(0,2,math.tan(32767.000000000)) tdSql.checkData(0,3,math.tan(63.500000000)) - tdSql.execute("create stable st (ts timestamp, num1 float, num2 double) tags (t1 int);") - tdSql.execute(f'create table tb1 using st tags (1)') - tdSql.execute(f'create table tb2 using st tags (2)') - tdSql.execute(f'create table tb3 using st tags (3)') - tdSql.execute('insert into tb1 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 )) - tdSql.execute('insert into tb1 values (now()-30s, {}, {})'.format(PI ,PI )) - tdSql.execute('insert into tb1 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5)) - tdSql.execute('insert into tb1 values (now()-10s, {}, {})'.format(PI*2 ,PI*2)) - tdSql.execute('insert into tb1 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5)) - - tdSql.execute('insert into tb2 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 )) - tdSql.execute('insert into tb2 values (now()-30s, {}, {})'.format(PI ,PI )) - tdSql.execute('insert into tb2 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5)) - tdSql.execute('insert into tb2 values (now()-10s, {}, {})'.format(PI*2 ,PI*2)) - tdSql.execute('insert into tb2 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5)) - - for i in range(100): - tdSql.execute('insert into tb3 values (now()+{}s, {}, {})'.format(i,PI*(5+i)/2 ,PI*(5+i)/2)) - - self.check_result_auto_tan("select num1,num2 from tb3;" , "select tan(num1),tan(num2) from tb3") - - def support_super_table_test(self): - tdSql.execute(" use db ") - self.check_result_auto_tan( " select c5 from stb1 order by ts " , "select tan(c5) from stb1 order by ts" ) - self.check_result_auto_tan( " select c5 from stb1 order by tbname " , "select tan(c5) from stb1 order by tbname" ) - self.check_result_auto_tan( " select c5 from stb1 where c1 > 0 order by tbname " , "select tan(c5) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_tan( " select c5 from stb1 where c1 > 0 order by tbname " , "select tan(c5) from stb1 where c1 > 0 order by tbname" ) - - self.check_result_auto_tan( " select t1,c5 from stb1 order by ts " , "select tan(t1), tan(c5) from stb1 order by ts" ) - self.check_result_auto_tan( " select t1,c5 from stb1 order by tbname " , "select tan(t1) ,tan(c5) from stb1 order by tbname" ) - self.check_result_auto_tan( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select tan(t1) ,tan(c5) from stb1 where c1 > 0 order by tbname" ) - self.check_result_auto_tan( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select tan(t1) , tan(c5) from stb1 where c1 > 0 order by tbname" ) + tdSql.execute(f"create stable {dbname}.st (ts timestamp, num1 float, num2 double) tags (t1 int);") + tdSql.execute(f'create table {dbname}.tb1 using {dbname}.st tags (1)') + tdSql.execute(f'create table {dbname}.tb2 using {dbname}.st tags (2)') + tdSql.execute(f'create table {dbname}.tb3 using {dbname}.st tags (3)') + tdSql.execute(f'insert into {dbname}.tb1 values (now()-40s, {PI/2}, {PI/2})') + tdSql.execute(f'insert into {dbname}.tb1 values (now()-30s, {PI}, {PI})') + tdSql.execute(f'insert into {dbname}.tb1 values (now()-20s, {PI*1.5}, {PI*1.5})') + tdSql.execute(f'insert into {dbname}.tb1 values (now()-10s, {PI*2}, {PI*2})') + tdSql.execute(f'insert into {dbname}.tb1 values (now(), {PI*2.5}, {PI*2.5})') + + tdSql.execute(f'insert into {dbname}.tb2 values (now()-40s, {PI/2}, {PI/2})') + tdSql.execute(f'insert into {dbname}.tb2 values (now()-30s, {PI}, {PI})') + tdSql.execute(f'insert into {dbname}.tb2 values (now()-20s, {PI*1.5}, {PI*1.5})') + tdSql.execute(f'insert into {dbname}.tb2 values (now()-10s, {PI*2}, {PI*2})') + tdSql.execute(f'insert into {dbname}.tb2 values (now(), {PI*2.5}, {PI*2.5})') + + self.check_result_auto_tan(f"select num1,num2 from {dbname}.tb3;" , f"select tan(num1),tan(num2) from {dbname}.tb3") + + def support_super_table_test(self, dbname="db"): + self.check_result_auto_tan( f"select c5 from {dbname}.stb1 order by ts " , f"select tan(c5) from {dbname}.stb1 order by ts" ) + self.check_result_auto_tan( f"select c5 from {dbname}.stb1 order by tbname " , f"select tan(c5) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_tan( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_tan( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + + self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select tan(t1), tan(c5) from {dbname}.stb1 order by ts" ) + self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select tan(t1) ,tan(c5) from {dbname}.stb1 order by tbname" ) + self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(t1) ,tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) + self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(t1) , tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" ) pass - - + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() tdLog.printNoPrefix("==========step1:create table ==============") - + self.prepare_datas() - tdLog.printNoPrefix("==========step2:test errors ==============") + tdLog.printNoPrefix("==========step2:test errors ==============") self.test_errors() - - tdLog.printNoPrefix("==========step3:support types ============") + + tdLog.printNoPrefix("==========step3:support types ============") self.support_types() - tdLog.printNoPrefix("==========step4: tan basic query ============") + tdLog.printNoPrefix("==========step4: tan basic query ============") self.basic_tan_function() - tdLog.printNoPrefix("==========step5: big number tan query ============") + tdLog.printNoPrefix("==========step5: big number tan query ============") self.test_big_number() - - tdLog.printNoPrefix("==========step6: tan boundary query ============") + tdLog.printNoPrefix("==========step6: tan boundary query ============") self.check_boundary_values() - tdLog.printNoPrefix("==========step7: tan filter query ============") + tdLog.printNoPrefix("==========step7: tan filter query ============") self.abs_func_filter() diff --git a/tests/system-test/2-query/timetruncate.py b/tests/system-test/2-query/timetruncate.py index 3551d8ee2cfb0669c23ed1754ebcb65c69e48daa..d773114c3c3d84bb6b102852d84223d68e0c0a2f 100644 --- a/tests/system-test/2-query/timetruncate.py +++ b/tests/system-test/2-query/timetruncate.py @@ -25,6 +25,7 @@ class TDTestCase: self.ntbname = f'{self.dbname}.ntb' self.stbname = f'{self.dbname}.stb' self.ctbname = f'{self.dbname}.ctb' + def check_ms_timestamp(self,unit,date_time): if unit.lower() == '1a': for i in range(len(self.ts_str)): @@ -45,11 +46,12 @@ class TDTestCase: elif unit.lower() == '1d': for i in range(len(self.ts_str)): ts_result = self.get_time.get_ms_timestamp(str(tdSql.queryResult[i][0])) - tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24)*24*60*60*1000) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24)*24*60*60*1000) elif unit.lower() == '1w': for i in range(len(self.ts_str)): ts_result = self.get_time.get_ms_timestamp(str(tdSql.queryResult[i][0])) tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24/7)*7*24*60*60*1000) + def check_us_timestamp(self,unit,date_time): if unit.lower() == '1u': for i in range(len(self.ts_str)): @@ -74,11 +76,12 @@ class TDTestCase: elif unit.lower() == '1d': for i in range(len(self.ts_str)): ts_result = self.get_time.get_us_timestamp(str(tdSql.queryResult[i][0])) - tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24)*24*60*60*1000*1000 ) + tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24)*24*60*60*1000*1000 ) elif unit.lower() == '1w': for i in range(len(self.ts_str)): ts_result = self.get_time.get_us_timestamp(str(tdSql.queryResult[i][0])) tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24/7)*7*24*60*60*1000*1000) + def check_ns_timestamp(self,unit,date_time): if unit.lower() == '1b': for i in range(len(self.ts_str)): @@ -100,21 +103,23 @@ class TDTestCase: tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60)*60*60*1000*1000*1000 ) elif unit.lower() == '1d': for i in range(len(self.ts_str)): - tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60/24)*24*60*60*1000*1000*1000 ) + tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60/24)*24*60*60*1000*1000*1000 ) elif unit.lower() == '1w': for i in range(len(self.ts_str)): tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60/24/7)*7*24*60*60*1000*1000*1000) + def check_tb_type(self,unit,tb_type): - if tb_type.lower() == 'ntb': + if tb_type.lower() == 'ntb': tdSql.query(f'select timetruncate(ts,{unit}) from {self.ntbname}') elif tb_type.lower() == 'ctb': tdSql.query(f'select timetruncate(ts,{unit}) from {self.ctbname}') elif tb_type.lower() == 'stb': tdSql.query(f'select timetruncate(ts,{unit}) from {self.stbname}') + def data_check(self,date_time,precision,tb_type): for unit in self.time_unit: if (unit.lower() == '1u' and precision.lower() == 'ms') or (unit.lower() == '1b' and precision.lower() == 'us') or (unit.lower() == '1b' and precision.lower() == 'ms'): - if tb_type.lower() == 'ntb': + if tb_type.lower() == 'ntb': tdSql.error(f'select timetruncate(ts,{unit}) from {self.ntbname}') elif tb_type.lower() == 'ctb': tdSql.error(f'select timetruncate(ts,{unit}) from {self.ctbname}') @@ -139,16 +144,19 @@ class TDTestCase: tdSql.error(f'select timetruncate(ts,{unit}) from {self.ctbname}') elif tb_type.lower() == 'stb': tdSql.error(f'select timetruncate(ts,{unit}) from {self.stbname}') + def function_check_ntb(self): for precision in self.db_param_precision: tdSql.execute(f'drop database if exists {self.dbname}') tdSql.execute(f'create database {self.dbname} precision "{precision}"') + tdLog.info(f"=====now is in a {precision} database=====") tdSql.execute(f'use {self.dbname}') tdSql.execute(f'create table {self.ntbname} (ts timestamp,c0 int)') for ts in self.ts_str: tdSql.execute(f'insert into {self.ntbname} values("{ts}",1)') date_time = self.get_time.time_transform(self.ts_str,precision) self.data_check(date_time,precision,'ntb') + def function_check_stb(self): for precision in self.db_param_precision: tdSql.execute(f'drop database if exists {self.dbname}') @@ -161,9 +169,11 @@ class TDTestCase: date_time = self.get_time.time_transform(self.ts_str,precision) self.data_check(date_time,precision,'ctb') self.data_check(date_time,precision,'stb') + def run(self): self.function_check_ntb() self.function_check_stb() + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/2-query/tsbsQuery.py b/tests/system-test/2-query/tsbsQuery.py index 617f7e74643c9b1dbb24834e3535b4bac669e4bb..04a80a74ad2d6ec21a97dc17bba05fb02df3830b 100644 --- a/tests/system-test/2-query/tsbsQuery.py +++ b/tests/system-test/2-query/tsbsQuery.py @@ -22,7 +22,7 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor(), True) + tdSql.init(conn.cursor(), False) def create_ctable(self,tsql=None, dbName='db',stbName='stb',ctbPrefix='ctb',ctbNum=1): tsql.execute("use %s" %dbName) @@ -32,16 +32,16 @@ class TDTestCase: for i in range(ctbNum): tagValue = 'beijing' if (i % 10 == 0): - sql += " %s%d using %s (name,fleet,driver,device_version,load_capacity,fuel_capacity,nominal_fuel_consumption) tags('truck_%d', 'South%d','Trish%d','v2.%d', 1500+%d*20, 150+%d*2, 5+%d)"%(ctbPrefix,i,stbName,i,i,i,i,(1500+i*20),(150+i*2),(5+i)) + sql += f" {dbName}.%s%d using %s (name,fleet,driver,device_version,load_capacity,fuel_capacity,nominal_fuel_consumption) tags('truck_%d', 'South%d','Trish%d','v2.%d', 1500+%d*20, 150+%d*2, 5+%d)"%(ctbPrefix,i,stbName,i,i,i,i,(1500+i*20),(150+i*2),(5+i)) else: model = 'H-%d'%i - sql += " %s%d using %s tags('truck_%d', 'South%d','Trish%d','%s','v2.%d', %d, %d,%d)"%(ctbPrefix,i,stbName,i,i,i,model,i,(1500+i*20),(150+i*2),(5+i)) + sql += f" {dbName}.%s%d using %s tags('truck_%d', 'South%d','Trish%d','%s','v2.%d', %d, %d,%d)"%(ctbPrefix,i,stbName,i,i,i,model,i,(1500+i*20),(150+i*2),(5+i)) if (i > 0) and (i%1000 == 0): tsql.execute(sql) sql = pre_create if sql != pre_create: tsql.execute(sql) - + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) return @@ -54,32 +54,32 @@ class TDTestCase: startTs = int(round(t * 1000)) for i in range(ctbNum): - sql += " %s%d values "%(ctbPrefix,i) + sql += f" {dbName}.%s%d values "%(ctbPrefix,i) for j in range(rowsPerTbl): if(ctbPrefix=="rct"): sql += f"({startTs+j*60000}, {80+j}, {90+j}, {85+j}, {30+j*10}, {1.2*j}, {221+j*2}, {20+j*0.2}) " elif ( ctbPrefix=="dct"): status= random.randint(0,1) - sql += f"( {startTs+j*60000}, {1+j*0.1},{1400+j*15}, {status} ) " + sql += f"( {startTs+j*60000}, {1+j*0.1},{1400+j*15}, {status} ) " # tdLog.debug("1insert sql:%s"%sql) if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): # tdLog.debug("2insert sql:%s"%sql) tsql.execute(sql) if j < rowsPerTbl - 1: - sql = "insert into %s%d values " %(ctbPrefix,i) + sql = f"insert into {dbName}.%s%d values " %(ctbPrefix,i) else: sql = "insert into " if sql != pre_insert: # tdLog.debug("3insert sql:%s"%sql) - tsql.execute(sql) + tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return def prepareData(self): dbname="db_tsbs" - stabname1="readings" - stabname2="diagnostics" - ctbnamePre1="rct" + stabname1=f"{dbname}.readings" + stabname2=f"{dbname}.diagnostics" + ctbnamePre1="rct" ctbnamePre2="dct" ctbNums=50 self.ctbNums=ctbNums @@ -107,7 +107,7 @@ class TDTestCase: # tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}',NULL ,'v2.3')") # else: # tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')") - # for j in range(ctbNums): + # for j in range(ctbNums): # for i in range(rowNUms): # tdSql.execute( # f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )" @@ -133,106 +133,106 @@ class TDTestCase: # tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query ) - def tsbsIotQuery(self,insertinto=True): - + def tsbsIotQuery(self,insertinto=True, dbname="db_tsbs"): + tdSql.execute("use db_tsbs") - + # test interval and partition - tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ") + tdSql.query(f"select avg(velocity) as mean_velocity ,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ") parRows=tdSql.queryRows - tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ") + tdSql.query(f"select avg(velocity) as mean_velocity ,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ") tdSql.checkRows(parRows) - - - # test insert into + + + # test insert into if insertinto == True : - tdSql.execute("create table testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;") - tdSql.query("insert into testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);") - - tdSql.query("insert into testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);") + tdSql.execute(f"create table {dbname}.testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;") + tdSql.query(f"insert into {dbname}.testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);") + + tdSql.query(f"insert into {dbname}.testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);") # test paitition interval fill - tdSql.query("SELECT name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0) ;") + tdSql.query(f"select name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv from {dbname}.readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0) ;") # test partition interval limit (PRcore-TD-17410) - tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings partition BY name,driver,fleet interval (10m) limit 1);") + tdSql.query(f"select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity from {dbname}.readings partition BY name,driver,fleet interval (10m) limit 1);") tdSql.checkRows(self.ctbNums) # test partition interval Pseudo time-column - tdSql.query("SELECT count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") + tdSql.query(f"select count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") # 1 high-load: - tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name desc, ts DESC;") + tdSql.query(f"select ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity from {dbname}.diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name desc, ts DESC;") - tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;") + tdSql.query(f"select ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity from {dbname}.diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;") - # 2 stationary-trucks - tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)") - tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name") + # 2 stationary-trucks + tdSql.query(f"select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity from {dbname}.readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)") + tdSql.query(f"select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity from {dbname}.readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name") # 3 long-driving-sessions - tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT _wstart as ts,name,driver,avg(velocity) as mean_velocity FROM readings where ts > '2016-01-01T00:00:34Z' AND ts <= '2016-01-01T04:00:34Z' partition BY name,driver interval(10m)) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 22 ;") + tdSql.query(f"select name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT _wstart as ts,name,driver,avg(velocity) as mean_velocity from {dbname}.readings where ts > '2016-01-01T00:00:34Z' AND ts <= '2016-01-01T04:00:34Z' partition BY name,driver interval(10m)) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 22 ;") #4 long-daily-sessions - tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT name,driver,avg(velocity) as mean_velocity FROM readings WHERE fleet ='West' AND ts > '2016-01-01T12:31:37Z' AND ts <= '2016-01-05T12:31:37Z' partition BY name,driver interval(10m) ) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 60") + tdSql.query(f"select name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT name,driver,avg(velocity) as mean_velocity from {dbname}.readings WHERE fleet ='West' AND ts > '2016-01-01T12:31:37Z' AND ts <= '2016-01-05T12:31:37Z' partition BY name,driver interval(10m) ) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 60") # 5. avg-daily-driving-duration - tdSql.query("select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;") + tdSql.query(f"select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from {dbname}.readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;") - # # 6. avg-daily-driving-session + # # 6. avg-daily-driving-session # #taosc core dumped - tdSql.query(" SELECT _wstart as ts,name,floor(avg(velocity)/5) AS mv FROM readings WHERE name is not null AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0);") - # tdSql.query("select name,diff(mv) AS difka FROM (SELECT ts,name,mv FROM (SELECT _wstart as ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0))) group BY name ;") - # tdSql.query("SELECT _wstart,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0)") + tdSql.query(f"select _wstart as ts,name,floor(avg(velocity)/5) AS mv from {dbname}.readings WHERE name is not null AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0);") + # tdSql.query(f"select name,diff(mv) AS difka FROM (SELECT ts,name,mv FROM (SELECT _wstart as ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv from {dbname}.readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0))) group BY name ;") + # tdSql.query(f"select _wstart,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv from {dbname}.readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0)") # 7. avg-load - tdSql.query("SELECT fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml FROM diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;") + tdSql.query(f"select fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml from {dbname}.diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;") - # 8. daily-activity - tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") + # 8. daily-activity + tdSql.query(f"select model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") - tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") + tdSql.query(f"select model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;") - tdSql.query("SELECT _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;") + tdSql.query(f"select _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;") - tdSql.query("SELECT _wstart as ts,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;") + tdSql.query(f"select _wstart as ts,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;") # 9. breakdown-frequency # NULL ---count(NULL)=0 expect count(NULL)= 100 - tdSql.query("SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where model is null partition BY model,state_changed ") + tdSql.query(f"select model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where model is null partition BY model,state_changed ") parRows=tdSql.queryRows assert parRows != 0 , "query result is wrong, query rows %d but expect > 0 " %parRows - tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;") - sql="select model,ctc from (SELECT model,count(state_changed) as ctc FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= 1451606400000 AND ts < 1451952001000 ) WHERE ts >= 1451606400000 AND ts < 1451952001000 partition BY model interval(10m)) partition BY model) WHERE state_changed = 1 partition BY model )where model is null;" + tdSql.query(f"select model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;") + sql=f"select model,ctc from (SELECT model,count(state_changed) as ctc FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs from {dbname}.diagnostics WHERE ts >= 1451606400000 AND ts < 1451952001000 ) WHERE ts >= 1451606400000 AND ts < 1451952001000 partition BY model interval(10m)) partition BY model) WHERE state_changed = 1 partition BY model )where model is null;" # for i in range(2): # tdSql.query("%s"%sql) - # quertR1=tdSql.queryResult + # quertR1=tdSql.queryResult # for j in range(50): # tdSql.query("%s"%sql) # quertR2=tdSql.queryResult - # assert quertR1 == quertR2 , "%s != %s ,The results of multiple queries are different" %(quertR1,quertR2) + # assert quertR1 == quertR2 , "%s != %s ,The results of multiple queries are different" %(quertR1,quertR2) + - #it's already supported: # last-loc - tdSql.query("SELECT last_row(ts),latitude,longitude,name,driver FROM readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;") + tdSql.query(f"select last_row(ts),latitude,longitude,name,driver from {dbname}.readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;") #2. low-fuel - tdSql.query("SELECT last_row(ts),name,driver,fuel_state,driver FROM diagnostics WHERE fuel_state <= 0.1 AND fleet = 'South' and name IS NOT NULL GROUP BY name,driver order by name;") - + tdSql.query(f"select last_row(ts),name,driver,fuel_state,driver from {dbname}.diagnostics WHERE fuel_state <= 0.1 AND fleet = 'South' and name IS NOT NULL GROUP BY name,driver order by name;") + # 3. avg-vs-projected-fuel-consumption - tdSql.query("select avg(fuel_consumption) as avg_fuel_consumption,avg(nominal_fuel_consumption) as nominal_fuel_consumption from readings where velocity > 1 group by fleet") - - def run(self): + tdSql.query(f"select avg(fuel_consumption) as avg_fuel_consumption,avg(nominal_fuel_consumption) as nominal_fuel_consumption from {dbname}.readings where velocity > 1 group by fleet") + + def run(self): tdLog.printNoPrefix("==========step1:create database and table,insert data ==============") self.prepareData() self.tsbsIotQuery() diff --git a/tests/system-test/2-query/ttl_comment.py b/tests/system-test/2-query/ttl_comment.py index 33bd61b66c85a2519513b9eee10bfcdaff8e8925..c26393158cefe46fb054d7bd3e28a621cab73199 100644 --- a/tests/system-test/2-query/ttl_comment.py +++ b/tests/system-test/2-query/ttl_comment.py @@ -26,20 +26,21 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) + tdSql.init(conn.cursor(), False) def run(self): + dbname="db" tdSql.prepare() - tdSql.error("create table ttl_table1(ts timestamp, i int) ttl 1.1") - tdSql.error("create table ttl_table2(ts timestamp, i int) ttl 1e1") - tdSql.error("create table ttl_table3(ts timestamp, i int) ttl -1") + tdSql.error(f"create table {dbname}.ttl_table1(ts timestamp, i int) ttl 1.1") + tdSql.error(f"create table {dbname}.ttl_table2(ts timestamp, i int) ttl 1e1") + tdSql.error(f"create table {dbname}.ttl_table3(ts timestamp, i int) ttl -1") print("============== STEP 1 ===== test normal table") - tdSql.execute("create table normal_table1(ts timestamp, i int)") - tdSql.execute("create table normal_table2(ts timestamp, i int) comment '' ttl 3") - tdSql.execute("create table normal_table3(ts timestamp, i int) ttl 2100000000020 comment 'hello'") + tdSql.execute(f"create table {dbname}.normal_table1(ts timestamp, i int)") + tdSql.execute(f"create table {dbname}.normal_table2(ts timestamp, i int) comment '' ttl 3") + tdSql.execute(f"create table {dbname}.normal_table3(ts timestamp, i int) ttl 2100000000020 comment 'hello'") tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'") tdSql.checkData(0, 0, 'normal_table1') @@ -58,32 +59,32 @@ class TDTestCase: tdSql.checkData(0, 7, 2147483647) tdSql.checkData(0, 8, 'hello') - tdSql.execute("alter table normal_table1 comment 'nihao'") + tdSql.execute(f"alter table {dbname}.normal_table1 comment 'nihao'") tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'") tdSql.checkData(0, 0, 'normal_table1') tdSql.checkData(0, 8, 'nihao') - tdSql.execute("alter table normal_table1 comment ''") + tdSql.execute(f"alter table {dbname}.normal_table1 comment ''") tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'") tdSql.checkData(0, 0, 'normal_table1') tdSql.checkData(0, 8, '') - tdSql.execute("alter table normal_table2 comment 'fly'") + tdSql.execute(f"alter table {dbname}.normal_table2 comment 'fly'") tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table2'") tdSql.checkData(0, 0, 'normal_table2') tdSql.checkData(0, 8, 'fly') - tdSql.execute("alter table normal_table3 comment 'fly'") + tdSql.execute(f"alter table {dbname}.normal_table3 comment 'fly'") tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table3'") tdSql.checkData(0, 0, 'normal_table3') tdSql.checkData(0, 8, 'fly') - tdSql.execute("alter table normal_table1 ttl 1") + tdSql.execute(f"alter table {dbname}.normal_table1 ttl 1") tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'") tdSql.checkData(0, 0, 'normal_table1') tdSql.checkData(0, 7, 1) - tdSql.execute("alter table normal_table3 ttl 0") + tdSql.execute(f"alter table {dbname}.normal_table3 ttl 0") tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table3'") tdSql.checkData(0, 0, 'normal_table3') tdSql.checkData(0, 7, 0) @@ -91,9 +92,9 @@ class TDTestCase: print("============== STEP 2 ===== test super table") - tdSql.execute("create table super_table1(ts timestamp, i int) tags(t int)") - tdSql.execute("create table super_table2(ts timestamp, i int) tags(t int) comment ''") - tdSql.execute("create table super_table3(ts timestamp, i int) tags(t int) comment 'super'") + tdSql.execute(f"create table {dbname}.super_table1(ts timestamp, i int) tags(t int)") + tdSql.execute(f"create table {dbname}.super_table2(ts timestamp, i int) tags(t int) comment ''") + tdSql.execute(f"create table {dbname}.super_table3(ts timestamp, i int) tags(t int) comment 'super'") tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table1'") tdSql.checkData(0, 0, 'super_table1') @@ -110,32 +111,32 @@ class TDTestCase: tdSql.checkData(0, 6, 'super') - tdSql.execute("alter table super_table1 comment 'nihao'") + tdSql.execute(f"alter table {dbname}.super_table1 comment 'nihao'") tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table1'") tdSql.checkData(0, 0, 'super_table1') tdSql.checkData(0, 6, 'nihao') - tdSql.execute("alter table super_table1 comment ''") + tdSql.execute(f"alter table {dbname}.super_table1 comment ''") tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table1'") tdSql.checkData(0, 0, 'super_table1') tdSql.checkData(0, 6, '') - tdSql.execute("alter table super_table2 comment 'fly'") + tdSql.execute(f"alter table {dbname}.super_table2 comment 'fly'") tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table2'") tdSql.checkData(0, 0, 'super_table2') tdSql.checkData(0, 6, 'fly') - tdSql.execute("alter table super_table3 comment 'tdengine'") + tdSql.execute(f"alter table {dbname}.super_table3 comment 'tdengine'") tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table3'") tdSql.checkData(0, 0, 'super_table3') tdSql.checkData(0, 6, 'tdengine') print("============== STEP 3 ===== test child table") - tdSql.execute("create table child_table1 using super_table1 tags(1) ttl 10") - tdSql.execute("create table child_table2 using super_table1 tags(1) comment ''") - tdSql.execute("create table child_table3 using super_table1 tags(1) comment 'child'") - tdSql.execute("insert into child_table4 using super_table1 tags(1) values(now, 1)") + tdSql.execute(f"create table {dbname}.child_table1 using {dbname}.super_table1 tags(1) ttl 10") + tdSql.execute(f"create table {dbname}.child_table2 using {dbname}.super_table1 tags(1) comment ''") + tdSql.execute(f"create table {dbname}.child_table3 using {dbname}.super_table1 tags(1) comment 'child'") + tdSql.execute(f"insert into {dbname}.child_table4 using {dbname}.super_table1 tags(1) values(now, 1)") tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table1'") @@ -160,38 +161,38 @@ class TDTestCase: tdSql.checkData(0, 8, None) - tdSql.execute("alter table child_table1 comment 'nihao'") + tdSql.execute(f"alter table {dbname}.child_table1 comment 'nihao'") tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table1'") tdSql.checkData(0, 0, 'child_table1') tdSql.checkData(0, 8, 'nihao') - tdSql.execute("alter table child_table1 comment ''") + tdSql.execute(f"alter table {dbname}.child_table1 comment ''") tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table1'") tdSql.checkData(0, 0, 'child_table1') tdSql.checkData(0, 8, '') - tdSql.execute("alter table child_table2 comment 'fly'") + tdSql.execute(f"alter table {dbname}.child_table2 comment 'fly'") tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table2'") tdSql.checkData(0, 0, 'child_table2') tdSql.checkData(0, 8, 'fly') - tdSql.execute("alter table child_table3 comment 'tdengine'") + tdSql.execute(f"alter table {dbname}.child_table3 comment 'tdengine'") tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table3'") tdSql.checkData(0, 0, 'child_table3') tdSql.checkData(0, 8, 'tdengine') - tdSql.execute("alter table child_table4 comment 'tdengine'") + tdSql.execute(f"alter table {dbname}.child_table4 comment 'tdengine'") tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table4'") tdSql.checkData(0, 0, 'child_table4') tdSql.checkData(0, 8, 'tdengine') - tdSql.execute("alter table child_table4 ttl 9") + tdSql.execute(f"alter table {dbname}.child_table4 ttl 9") tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table4'") tdSql.checkData(0, 0, 'child_table4') tdSql.checkData(0, 7, 9) - tdSql.execute("alter table child_table3 ttl 9") + tdSql.execute(f"alter table {dbname}.child_table3 ttl 9") tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table3'") tdSql.checkData(0, 0, 'child_table3') tdSql.checkData(0, 7, 9) @@ -203,4 +204,3 @@ class TDTestCase: tdCases.addWindows(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase()) - diff --git a/tests/system-test/2-query/twa.py b/tests/system-test/2-query/twa.py index 8281527bd46be8f1b14d6ee2098a2888c20a737a..62940477cf701d69e8c8e7568ae4b56d68518d81 100644 --- a/tests/system-test/2-query/twa.py +++ b/tests/system-test/2-query/twa.py @@ -7,10 +7,7 @@ import platform import math class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143, - "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } + updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) @@ -21,46 +18,45 @@ class TDTestCase: self.row_nums = 100 self.time_step = 1000 - def prepare_datas_of_distribute(self): + def prepare_datas_of_distribute(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5") - tdSql.execute(" use testdb ") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) ''' ) for i in range(self.tb_nums): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') ts = self.ts for j in range(self.row_nums): ts+=j*self.time_step tdSql.execute( - f"insert into ct{i+1} values({ts}, 1, 11111, 111, 1, 1.11, 11.11, 2, 'binary{j}', 'nchar{j}', now()+{1*j}a )" + f"insert into {dbname}.ct{i+1} values({ts}, 1, 11111, 111, 1, 1.11, 11.11, 2, 'binary{j}', 'nchar{j}', now()+{1*j}a )" ) - tdSql.execute("insert into ct1 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct1 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct1 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdLog.info(" prepare data for distributed_aggregate done! ") - def twa_support_types(self): - tdSql.query("desc stb1 ") + def twa_support_types(self, dbname="testdb"): + tdSql.query(f"desc {dbname}.stb1 ") schema_list = tdSql.queryResult for col_type in schema_list: if col_type[1] in ["TINYINT" ,"SMALLINT","BIGINT" ,"INT","FLOAT","DOUBLE"]: - tdSql.query(f" select twa({col_type[0]}) from stb1 partition by tbname ") + tdSql.query(f"select twa({col_type[0]}) from {dbname}.stb1 partition by tbname ") else: - tdSql.error(f" select twa({col_type[0]}) from stb1 partition by tbname ") + tdSql.error(f"select twa({col_type[0]}) from {dbname}.stb1 partition by tbname ") - def check_distribute_datas(self): + def check_distribute_datas(self, dbname="testdb"): # get vgroup_ids of all - tdSql.query("show vgroups ") + tdSql.query(f"show {dbname}.vgroups ") vgroups = tdSql.queryResult vnode_tables={} @@ -69,7 +65,7 @@ class TDTestCase: vnode_tables[vgroup_id[0]]=[] # check sub_table of per vnode ,make sure sub_table has been distributed - tdSql.query(f"select * from information_schema.ins_tables where db_name = 'testdb' and table_name like 'ct%'") + tdSql.query(f"select * from information_schema.ins_tables where db_name = '{dbname}' and table_name like 'ct%'") table_names = tdSql.queryResult tablenames = [] for table_name in table_names: @@ -83,28 +79,28 @@ class TDTestCase: if count < 2: tdLog.exit(" the datas of all not satisfy sub_table has been distributed ") - def distribute_twa_query(self): + def distribute_twa_query(self, dbname="testdb"): # basic filter - tdSql.query(" select twa(c1) from ct1 ") + tdSql.query(f"select twa(c1) from {dbname}.ct1 ") tdSql.checkData(0,0,1.000000000) - tdSql.query(" select twa(c1) from stb1 partition by tbname ") + tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by tbname ") tdSql.checkRows(self.tb_nums) tdSql.checkData(0,0,1.000000000) - tdSql.query(" select twa(c2) from stb1 group by tbname ") + tdSql.query(f"select twa(c2) from {dbname}.stb1 group by tbname ") tdSql.checkRows(self.tb_nums) tdSql.checkData(0,0,11111.000000000) - tdSql.query("select twa(c1+c2) from stb1 partition by tbname ") + tdSql.query(f"select twa(c1+c2) from {dbname}.stb1 partition by tbname ") tdSql.checkData(0,0,11112.000000000) - tdSql.query("select twa(c1) from stb1 partition by t1") + tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by t1") tdSql.checkRows(self.tb_nums) tdSql.checkData(0,0,1.000000000) # union all - tdSql.query(" select twa(c1) from stb1 partition by tbname union all select twa(c1) from stb1 partition by tbname ") + tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by tbname union all select twa(c1) from {dbname}.stb1 partition by tbname ") tdSql.checkRows(40) tdSql.checkData(0,0,1.000000000) @@ -112,26 +108,23 @@ class TDTestCase: tdSql.execute(" create database if not exists db ") tdSql.execute(" use db ") - tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") - tdSql.execute(" create table tb1 using st tags(1) ") - tdSql.execute(" create table tb2 using st tags(2) ") + tdSql.execute(" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") + tdSql.execute(" create table db.tb1 using db.st tags(1) ") + tdSql.execute(" create table db.tb2 using db.st tags(2) ") for i in range(10): ts = i*10 + self.ts - tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)") - tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)") - tdSql.query(" select twa(tb1.c1), twa(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts ") + tdSql.query(f"select twa(tb1.c1), twa(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts ") tdSql.checkRows(1) tdSql.checkData(0,0,4.500000000) tdSql.checkData(0,1,4.500000000) - # group by - tdSql.execute(" use testdb ") - # mixup with other functions - tdSql.query(" select twa(c1),twa(c2),max(c1),elapsed(ts) from stb1 ") + tdSql.query(f"select twa(c1),twa(c2),max(c1),elapsed(ts) from {dbname}.stb1 ") tdSql.checkData(0,0,1.000000000) tdSql.checkData(0,1,11111.000000000) tdSql.checkData(0,2,1) diff --git a/tests/system-test/2-query/union.py b/tests/system-test/2-query/union.py index 88767ab888c9bfe11c329eecd41f78442436cafb..4040bb71cbb92849dd63d11627c93a2954a4a0d1 100644 --- a/tests/system-test/2-query/union.py +++ b/tests/system-test/2-query/union.py @@ -58,10 +58,10 @@ class TDTestCase: def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False): table_reference = tb_list[0] - join_condition = table_reference + join_condition = f'{table_reference} {table_reference.split(".")[-1]}' join = "inner join" if INNER else "join" for i in range(len(tb_list[1:])): - join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}" + join_condition += f" {join} {tb_list[i+1]} {tb_list[i+1].split('.')[-1]} on {table_reference.split('.')[-1]}.{filter}={tb_list[i+1].split('.')[-1]}.{filter}" return join_condition @@ -76,7 +76,6 @@ class TDTestCase: elif query_conditon.startswith("min"): query_conditon = query_conditon[4:-1] - if query_conditon: return f" where {query_conditon} is not null" if col in NUM_COL: @@ -108,10 +107,10 @@ class TDTestCase: return f"select {select_clause} from {from_clause} {where_condition} {group_condition}" @property - def __join_tblist(self): + def __join_tblist(self, dbname="db"): return [ - ["ct1", "t1"], - ["ct4", "t1"], + [f"{dbname}.ct1", f"{dbname}.t1"], + [f"{dbname}.ct4", f"{dbname}.t1"], # ["ct1", "ct2", "ct4"], # ["ct1", "ct2", "t1"], # ["ct1", "ct4", "t1"], @@ -120,10 +119,10 @@ class TDTestCase: ] @property - def __tb_liast(self): + def __tb_list(self, dbname="db"): return [ - "ct1", - "ct4", + f"{dbname}.ct1", + f"{dbname}.ct4", ] def sql_list(self): @@ -131,7 +130,8 @@ class TDTestCase: __join_tblist = self.__join_tblist for join_tblist in __join_tblist: for join_tb in join_tblist: - select_claus_list = self.__query_condition(join_tb) + join_tb_name = join_tb.split(".")[-1] + select_claus_list = self.__query_condition(join_tb_name) for select_claus in select_claus_list: group_claus = self.__group_condition( col=select_claus) where_claus = self.__where_condition(query_conditon=select_claus) @@ -141,9 +141,10 @@ class TDTestCase: self.__single_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus), ) ) - __no_join_tblist = self.__tb_liast + __no_join_tblist = self.__tb_list for tb in __no_join_tblist: - select_claus_list = self.__query_condition(tb) + tb_name = join_tb.split(".")[-1] + select_claus_list = self.__query_condition(tb_name) for select_claus in select_claus_list: group_claus = self.__group_condition(col=select_claus) where_claus = self.__where_condition(query_conditon=select_claus) @@ -230,31 +231,29 @@ class TDTestCase: else: tdSql.error(f"{sqls[i]} union {sqls[j+i]}") - def __test_error(self): + def __test_error(self, dbname="db"): - tdSql.error( "show tables union show tables" ) - tdSql.error( "create table errtb1 union all create table errtb2" ) - tdSql.error( "drop table ct1 union all drop table ct3" ) - tdSql.error( "select c1 from ct1 union all drop table ct3" ) - tdSql.error( "select c1 from ct1 union all '' " ) - tdSql.error( " '' union all select c1 from ct1 " ) - # tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ") + tdSql.error( f"show {dbname}.tables union show {dbname}.tables" ) + tdSql.error( f"create table {dbname}.errtb1 union all create table {dbname}.errtb2" ) + tdSql.error( f"drop table {dbname}.ct1 union all drop table {dbname}.ct3" ) + tdSql.error( f"select c1 from {dbname}.ct1 union all drop table {dbname}.ct3" ) + tdSql.error( f"select c1 from {dbname}.ct1 union all '' " ) + tdSql.error( f" '' union all select c1 from{dbname}. ct1 " ) def all_test(self): self.__test_error() self.union_check() - - def __create_tb(self): + def __create_tb(self, dbname="db"): tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table stb1( + create_stb_sql = f'''create table {dbname}.stb1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp - ) tags (t1 int) + ) tags (tag1 int) ''' - create_ntb_sql = f'''create table t1( + create_ntb_sql = f'''create table {dbname}.t1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp @@ -264,30 +263,29 @@ class TDTestCase: tdSql.execute(create_ntb_sql) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') - { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') - def __insert_data(self, rows): + def __insert_data(self, rows, dbname="db"): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) for i in range(rows): tdSql.execute( - f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f'''insert into ct1 values + f'''insert into {dbname}.ct1 values ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) ''' ) tdSql.execute( - f'''insert into ct4 values + f'''insert into {dbname}.ct4 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -303,7 +301,7 @@ class TDTestCase: ) tdSql.execute( - f'''insert into ct2 values + f'''insert into {dbname}.ct2 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -319,13 +317,13 @@ class TDTestCase: ) for i in range(rows): - insert_data = f'''insert into t1 values + insert_data = f'''insert into {dbname}.t1 values ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) ''' tdSql.execute(insert_data) tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) @@ -341,7 +339,6 @@ class TDTestCase: ''' ) - def run(self): tdSql.prepare() @@ -355,8 +352,7 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - tdDnodes.stop(1) - tdDnodes.start(1) + tdSql.execute("flush database db") tdSql.execute("use db") diff --git a/tests/system-test/2-query/unique.py b/tests/system-test/2-query/unique.py index ccf7e287e27d7768acedc17b55969d1fab6d30cd..ec77cbbcdc9d83d0a63b54fbe377c14d8645ce52 100644 --- a/tests/system-test/2-query/unique.py +++ b/tests/system-test/2-query/unique.py @@ -11,49 +11,46 @@ from util.sql import * from util.cases import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - def prepare_datas(self): + def prepare_datas(self, dbname="db"): tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -69,84 +66,84 @@ class TDTestCase: ''' ) - def test_errors(self): + def test_errors(self, dbname="db"): error_sql_lists = [ - "select unique from t1", - "select unique(123--123)==1 from t1", - "select unique(123,123) from t1", - "select unique(c1,ts) from t1", - "select unique(c1,c1,ts) from t1", - "select unique(c1) as 'd1' from t1", - "select unique(c1 ,c2 ) from t1", - "select unique(c1 ,NULL) from t1", - "select unique(,) from t1;", - "select unique(floor(c1) ab from t1)", - "select unique(c1) as int from t1", - "select unique('c1') from t1", - "select unique(NULL) from t1", - "select unique('') from t1", - "select unique(c%) from t1", - "select unique(t1) from t1", - "select unique(True) from t1", - "select unique(c1) , count(c1) from t1", - "select unique(c1) , avg(c1) from t1", - "select unique(c1) , min(c1) from t1", - "select unique(c1) , spread(c1) from t1", - "select unique(c1) , diff(c1) from t1", - #"select unique(c1) , abs(c1) from t1", # support - #"select unique(c1) , c1 from t1", - "select unique from stb1 partition by tbname", - "select unique(123--123)==1 from stb1 partition by tbname", - "select unique(123) from stb1 partition by tbname", - "select unique(c1,ts) from stb1 partition by tbname", - "select unique(c1,c1,ts) from stb1 partition by tbname", - "select unique(c1) as 'd1' from stb1 partition by tbname", - "select unique(c1 ,c2 ) from stb1 partition by tbname", - "select unique(c1 ,NULL) from stb1 partition by tbname", - "select unique(,) from stb1 partition by tbname;", - #"select unique(floor(c1) ab from stb1 partition by tbname)", # support - #"select unique(c1) as int from stb1 partition by tbname", - "select unique('c1') from stb1 partition by tbname", - "select unique(NULL) from stb1 partition by tbname", - "select unique('') from stb1 partition by tbname", - "select unique(c%) from stb1 partition by tbname", - #"select unique(t1) from stb1 partition by tbname", # support - "select unique(True) from stb1 partition by tbname", - "select unique(c1) , count(c1) from stb1 partition by tbname", - "select unique(c1) , avg(c1) from stb1 partition by tbname", - "select unique(c1) , min(c1) from stb1 partition by tbname", - "select unique(c1) , spread(c1) from stb1 partition by tbname", - "select unique(c1) , diff(c1) from stb1 partition by tbname", - #"select unique(c1) , abs(c1) from stb1 partition by tbname", # support - #"select unique(c1) , c1 from stb1 partition by tbname" # support + f"select unique from {dbname}.t1", + f"select unique(123--123)==1 from {dbname}.t1", + f"select unique(123,123) from {dbname}.t1", + f"select unique(c1,ts) from {dbname}.t1", + f"select unique(c1,c1,ts) from {dbname}.t1", + f"select unique(c1) as 'd1' from {dbname}.t1", + f"select unique(c1 ,c2 ) from {dbname}.t1", + f"select unique(c1 ,NULL) from {dbname}.t1", + f"select unique(,) from {dbname}.t1;", + f"select unique(floor(c1) ab from {dbname}.t1)", + f"select unique(c1) as int from {dbname}.t1", + f"select unique('c1') from {dbname}.t1", + f"select unique(NULL) from {dbname}.t1", + f"select unique('') from {dbname}.t1", + f"select unique(c%) from {dbname}.t1", + f"select unique(t1) from {dbname}.t1", + f"select unique(True) from {dbname}.t1", + f"select unique(c1) , count(c1) from {dbname}.t1", + f"select unique(c1) , avg(c1) from {dbname}.t1", + f"select unique(c1) , min(c1) from {dbname}.t1", + f"select unique(c1) , spread(c1) from {dbname}.t1", + f"select unique(c1) , diff(c1) from {dbname}.t1", + #f"select unique(c1) , abs(c1) from {dbname}.t1", # support + #f"select unique(c1) , c1 from {dbname}.t1", + f"select unique from {dbname}.stb1 partition by tbname", + f"select unique(123--123)==1 from {dbname}.stb1 partition by tbname", + f"select unique(123) from {dbname}.stb1 partition by tbname", + f"select unique(c1,ts) from {dbname}.stb1 partition by tbname", + f"select unique(c1,c1,ts) from {dbname}.stb1 partition by tbname", + f"select unique(c1) as 'd1' from {dbname}.stb1 partition by tbname", + f"select unique(c1 ,c2 ) from {dbname}.stb1 partition by tbname", + f"select unique(c1 ,NULL) from {dbname}.stb1 partition by tbname", + f"select unique(,) from {dbname}.stb1 partition by tbname;", + #f"select unique(floor(c1) ab from {dbname}.stb1 partition by tbname)", # support + #f"select unique(c1) as int from {dbname}.stb1 partition by tbname", + f"select unique('c1') from {dbname}.stb1 partition by tbname", + f"select unique(NULL) from {dbname}.stb1 partition by tbname", + f"select unique('') from {dbname}.stb1 partition by tbname", + f"select unique(c%) from {dbname}.stb1 partition by tbname", + #f"select unique(t1) from {dbname}.stb1 partition by tbname", # support + f"select unique(True) from {dbname}.stb1 partition by tbname", + f"select unique(c1) , count(c1) from {dbname}.stb1 partition by tbname", + f"select unique(c1) , avg(c1) from {dbname}.stb1 partition by tbname", + f"select unique(c1) , min(c1) from {dbname}.stb1 partition by tbname", + f"select unique(c1) , spread(c1) from {dbname}.stb1 partition by tbname", + f"select unique(c1) , diff(c1) from {dbname}.stb1 partition by tbname", + #f"select unique(c1) , abs(c1) from {dbname}.stb1 partition by tbname", # support + #f"select unique(c1) , c1 from {dbname}.stb1 partition by tbname" # support ] for error_sql in error_sql_lists: tdSql.error(error_sql) pass - def support_types(self): + def support_types(self, dbname="db"): other_no_value_types = [ - "select unique(ts) from t1" , - "select unique(c7) from t1", - "select unique(c8) from t1", - "select unique(c9) from t1", - "select unique(ts) from ct1" , - "select unique(c7) from ct1", - "select unique(c8) from ct1", - "select unique(c9) from ct1", - "select unique(ts) from ct3" , - "select unique(c7) from ct3", - "select unique(c8) from ct3", - "select unique(c9) from ct3", - "select unique(ts) from ct4" , - "select unique(c7) from ct4", - "select unique(c8) from ct4", - "select unique(c9) from ct4", - "select unique(ts) from stb1 partition by tbname" , - "select unique(c7) from stb1 partition by tbname", - "select unique(c8) from stb1 partition by tbname", - "select unique(c9) from stb1 partition by tbname" + f"select unique(ts) from {dbname}.t1" , + f"select unique(c7) from {dbname}.t1", + f"select unique(c8) from {dbname}.t1", + f"select unique(c9) from {dbname}.t1", + f"select unique(ts) from {dbname}.ct1" , + f"select unique(c7) from {dbname}.ct1", + f"select unique(c8) from {dbname}.ct1", + f"select unique(c9) from {dbname}.ct1", + f"select unique(ts) from {dbname}.ct3" , + f"select unique(c7) from {dbname}.ct3", + f"select unique(c8) from {dbname}.ct3", + f"select unique(c9) from {dbname}.ct3", + f"select unique(ts) from {dbname}.ct4" , + f"select unique(c7) from {dbname}.ct4", + f"select unique(c8) from {dbname}.ct4", + f"select unique(c9) from {dbname}.ct4", + f"select unique(ts) from {dbname}.stb1 partition by tbname" , + f"select unique(c7) from {dbname}.stb1 partition by tbname", + f"select unique(c8) from {dbname}.stb1 partition by tbname", + f"select unique(c9) from {dbname}.stb1 partition by tbname" ] for type_sql in other_no_value_types: @@ -154,43 +151,43 @@ class TDTestCase: tdLog.info("support type ok , sql is : %s"%type_sql) type_sql_lists = [ - "select unique(c1) from t1", - "select unique(c2) from t1", - "select unique(c3) from t1", - "select unique(c4) from t1", - "select unique(c5) from t1", - "select unique(c6) from t1", - - "select unique(c1) from ct1", - "select unique(c2) from ct1", - "select unique(c3) from ct1", - "select unique(c4) from ct1", - "select unique(c5) from ct1", - "select unique(c6) from ct1", - - "select unique(c1) from ct3", - "select unique(c2) from ct3", - "select unique(c3) from ct3", - "select unique(c4) from ct3", - "select unique(c5) from ct3", - "select unique(c6) from ct3", - - "select unique(c1) from stb1 partition by tbname", - "select unique(c2) from stb1 partition by tbname", - "select unique(c3) from stb1 partition by tbname", - "select unique(c4) from stb1 partition by tbname", - "select unique(c5) from stb1 partition by tbname", - "select unique(c6) from stb1 partition by tbname", - - "select unique(c6) as alisb from stb1 partition by tbname", - "select unique(c6) alisb from stb1 partition by tbname", + f"select unique(c1) from {dbname}.t1", + f"select unique(c2) from {dbname}.t1", + f"select unique(c3) from {dbname}.t1", + f"select unique(c4) from {dbname}.t1", + f"select unique(c5) from {dbname}.t1", + f"select unique(c6) from {dbname}.t1", + + f"select unique(c1) from {dbname}.ct1", + f"select unique(c2) from {dbname}.ct1", + f"select unique(c3) from {dbname}.ct1", + f"select unique(c4) from {dbname}.ct1", + f"select unique(c5) from {dbname}.ct1", + f"select unique(c6) from {dbname}.ct1", + + f"select unique(c1) from {dbname}.ct3", + f"select unique(c2) from {dbname}.ct3", + f"select unique(c3) from {dbname}.ct3", + f"select unique(c4) from {dbname}.ct3", + f"select unique(c5) from {dbname}.ct3", + f"select unique(c6) from {dbname}.ct3", + + f"select unique(c1) from {dbname}.stb1 partition by tbname", + f"select unique(c2) from {dbname}.stb1 partition by tbname", + f"select unique(c3) from {dbname}.stb1 partition by tbname", + f"select unique(c4) from {dbname}.stb1 partition by tbname", + f"select unique(c5) from {dbname}.stb1 partition by tbname", + f"select unique(c6) from {dbname}.stb1 partition by tbname", + + f"select unique(c6) as alisb from {dbname}.stb1 partition by tbname", + f"select unique(c6) alisb from {dbname}.stb1 partition by tbname", ] for type_sql in type_sql_lists: tdSql.query(type_sql) def check_unique_table(self , unique_sql): - # unique_sql = "select unique(c1) from ct1" + # unique_sql = f"select unique(c1) from {dbname}.ct1" origin_sql = unique_sql.replace("unique(","").replace(")","") tdSql.query(unique_sql) unique_result = tdSql.queryResult @@ -219,83 +216,83 @@ class TDTestCase: else: tdLog.exit(" unique query check fail , unique sql is: %s " %unique_sql) - def basic_unique_function(self): + def basic_unique_function(self, dbname="db"): # basic query - tdSql.query("select c1 from ct3") + tdSql.query(f"select c1 from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select c1 from t1") + tdSql.query(f"select c1 from {dbname}.t1") tdSql.checkRows(12) - tdSql.query("select c1 from stb1") + tdSql.query(f"select c1 from {dbname}.stb1") tdSql.checkRows(25) # used for empty table , ct3 is empty - tdSql.query("select unique(c1) from ct3") + tdSql.query(f"select unique(c1) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select unique(c2) from ct3") + tdSql.query(f"select unique(c2) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select unique(c3) from ct3") + tdSql.query(f"select unique(c3) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select unique(c4) from ct3") + tdSql.query(f"select unique(c4) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select unique(c5) from ct3") + tdSql.query(f"select unique(c5) from {dbname}.ct3") tdSql.checkRows(0) - tdSql.query("select unique(c6) from ct3") + tdSql.query(f"select unique(c6) from {dbname}.ct3") # will support _rowts mix with - # tdSql.query("select unique(c6),_rowts from ct3") + # tdSql.query(f"select unique(c6),_rowts from {dbname}.ct3") # auto check for t1 table # used for regular table - tdSql.query("select unique(c1) from t1") + tdSql.query(f"select unique(c1) from {dbname}.t1") - tdSql.query("desc t1") + tdSql.query(f"desc {dbname}.t1") col_lists_rows = tdSql.queryResult col_lists = [] for col_name in col_lists_rows: col_lists.append(col_name[0]) for col in col_lists: - self.check_unique_table(f"select unique({col}) from t1") + self.check_unique_table(f"select unique({col}) from {dbname}.t1") # unique with super tags - tdSql.query("select unique(c1) from ct1") + tdSql.query(f"select unique(c1) from {dbname}.ct1") tdSql.checkRows(10) - tdSql.query("select unique(c1) from ct4") + tdSql.query(f"select unique(c1) from {dbname}.ct4") tdSql.checkRows(10) - #tdSql.error("select unique(c1),tbname from ct1") #support - #tdSql.error("select unique(c1),t1 from ct1") #support + #tdSql.error(f"select unique(c1),tbname from {dbname}.ct1") #support + #tdSql.error(f"select unique(c1),t1 from {dbname}.ct1") #support # unique with common col - #tdSql.error("select unique(c1) ,ts from ct1") - #tdSql.error("select unique(c1) ,c1 from ct1") + #tdSql.error(f"select unique(c1) ,ts from {dbname}.ct1") + #tdSql.error(f"select unique(c1) ,c1 from {dbname}.ct1") # unique with scalar function - #tdSql.error("select unique(c1) ,abs(c1) from ct1") - tdSql.error("select unique(c1) , unique(c2) from ct1") - #tdSql.error("select unique(c1) , abs(c2)+2 from ct1") + #tdSql.error(f"select unique(c1) ,abs(c1) from {dbname}.ct1") + tdSql.error(f"select unique(c1) , unique(c2) from {dbname}.ct1") + #tdSql.error(f"select unique(c1) , abs(c2)+2 from {dbname}.ct1") # unique with aggregate function - tdSql.error("select unique(c1) ,sum(c1) from ct1") - tdSql.error("select unique(c1) ,max(c1) from ct1") - tdSql.error("select unique(c1) ,csum(c1) from ct1") - tdSql.error("select unique(c1) ,count(c1) from ct1") + tdSql.error(f"select unique(c1) ,sum(c1) from {dbname}.ct1") + tdSql.error(f"select unique(c1) ,max(c1) from {dbname}.ct1") + tdSql.error(f"select unique(c1) ,csum(c1) from {dbname}.ct1") + tdSql.error(f"select unique(c1) ,count(c1) from {dbname}.ct1") # unique with filter where - tdSql.query("select unique(c1) from ct4 where c1 is null") + tdSql.query(f"select unique(c1) from {dbname}.ct4 where c1 is null") tdSql.checkData(0, 0, None) - tdSql.query("select unique(c1) from ct4 where c1 >2 order by 1") + tdSql.query(f"select unique(c1) from {dbname}.ct4 where c1 >2 order by 1") tdSql.checkData(0, 0, 3) tdSql.checkData(1, 0, 4) tdSql.checkData(2, 0, 5) tdSql.checkData(5, 0, 8) - tdSql.query("select unique(c1) from ct4 where c2 between 0 and 99999 order by 1 desc") + tdSql.query(f"select unique(c1) from {dbname}.ct4 where c2 between 0 and 99999 order by 1 desc") tdSql.checkData(0, 0, 8) tdSql.checkData(1, 0, 7) tdSql.checkData(2, 0, 6) @@ -307,43 +304,43 @@ class TDTestCase: tdSql.checkData(8, 0, 0) # unique with union all - tdSql.query("select unique(c1) from ct4 union all select c1 from ct1") + tdSql.query(f"select unique(c1) from {dbname}.ct4 union all select c1 from {dbname}.ct1") tdSql.checkRows(23) - tdSql.query("select unique(c1) from ct4 union all select distinct(c1) from ct4") + tdSql.query(f"select unique(c1) from {dbname}.ct4 union all select distinct(c1) from {dbname}.ct4") tdSql.checkRows(20) - tdSql.query("select unique(c2) from ct4 union all select abs(c2)/2 from ct4") + tdSql.query(f"select unique(c2) from {dbname}.ct4 union all select abs(c2)/2 from {dbname}.ct4") tdSql.checkRows(22) # unique with join # prepare join datas with same ts tdSql.execute(" use db ") - tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)") - tdSql.execute(" create table tb1 using st1 tags(1)") - tdSql.execute(" create table tb2 using st1 tags(2)") + tdSql.execute(" create stable db.st1 (ts timestamp , num int) tags(ind int)") + tdSql.execute(" create table db.tb1 using db.st1 tags(1)") + tdSql.execute(" create table db.tb2 using db.st1 tags(2)") - tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)") - tdSql.execute(" create table ttb1 using st2 tags(1)") - tdSql.execute(" create table ttb2 using st2 tags(2)") + tdSql.execute(" create stable db.st2 (ts timestamp , num int) tags(ind int)") + tdSql.execute(" create table db.ttb1 using db.st2 tags(1)") + tdSql.execute(" create table db.ttb2 using db.st2 tags(2)") start_ts = 1622369635000 # 2021-05-30 18:13:55 for i in range(10): ts_value = start_ts+i*1000 - tdSql.execute(f" insert into tb1 values({ts_value} , {i})") - tdSql.execute(f" insert into tb2 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.tb1 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.tb2 values({ts_value} , {i})") - tdSql.execute(f" insert into ttb1 values({ts_value} , {i})") - tdSql.execute(f" insert into ttb2 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.ttb1 values({ts_value} , {i})") + tdSql.execute(f" insert into {dbname}.ttb2 values({ts_value} , {i})") - tdSql.query("select unique(tb2.num) from tb1, tb2 where tb1.ts=tb2.ts order by 1") + tdSql.query(f"select unique(tb2.num) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts order by 1") tdSql.checkRows(10) tdSql.checkData(0,0,0) tdSql.checkData(1,0,1) tdSql.checkData(2,0,2) tdSql.checkData(9,0,9) - tdSql.query("select unique(tb2.num) from tb1, tb2 where tb1.ts=tb2.ts union all select unique(tb1.num) from tb1, tb2 where tb1.ts=tb2.ts order by 1") + tdSql.query(f"select unique(tb2.num) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts union all select unique(tb1.num) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts order by 1") tdSql.checkRows(20) tdSql.checkData(0,0,0) tdSql.checkData(2,0,1) @@ -351,23 +348,23 @@ class TDTestCase: tdSql.checkData(18,0,9) # nest query - # tdSql.query("select unique(c1) from (select c1 from ct1)") - tdSql.query("select c1 from (select unique(c1) c1 from ct4) order by 1 desc nulls first") + # tdSql.query(f"select unique(c1) from (select c1 from {dbname}.ct1)") + tdSql.query(f"select c1 from (select unique(c1) c1 from {dbname}.ct4) order by 1 desc nulls first") tdSql.checkRows(10) tdSql.checkData(0, 0, None) tdSql.checkData(1, 0, 8) tdSql.checkData(9, 0, 0) - tdSql.query("select sum(c1) from (select unique(c1) c1 from ct1)") + tdSql.query(f"select sum(c1) from (select unique(c1) c1 from {dbname}.ct1)") tdSql.checkRows(1) tdSql.checkData(0, 0, 45) - tdSql.query("select sum(c1) from (select distinct(c1) c1 from ct1) union all select sum(c1) from (select unique(c1) c1 from ct1)") + tdSql.query(f"select sum(c1) from (select distinct(c1) c1 from {dbname}.ct1) union all select sum(c1) from (select unique(c1) c1 from {dbname}.ct1)") tdSql.checkRows(2) tdSql.checkData(0, 0, 45) tdSql.checkData(1, 0, 45) - tdSql.query("select 1-abs(c1) from (select unique(c1) c1 from ct4) order by 1 nulls first") + tdSql.query(f"select 1-abs(c1) from (select unique(c1) c1 from {dbname}.ct4) order by 1 nulls first") tdSql.checkRows(10) tdSql.checkData(0, 0, None) tdSql.checkData(1, 0, -7.000000000) @@ -375,104 +372,103 @@ class TDTestCase: # bug for stable #partition by tbname - # tdSql.query(" select unique(c1) from stb1 partition by tbname ") + # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ") # tdSql.checkRows(21) - # tdSql.query(" select unique(c1) from stb1 partition by tbname ") + # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ") # tdSql.checkRows(21) # group by - tdSql.error("select unique(c1) from ct1 group by c1") - tdSql.error("select unique(c1) from ct1 group by tbname") + tdSql.error(f"select unique(c1) from {dbname}.ct1 group by c1") + tdSql.error(f"select unique(c1) from {dbname}.ct1 group by tbname") # super table # super table - tdSql.error("select tbname , tail(c1,2) from stb1 group by tbname") - tdSql.query("select tail(c1,2) from stb1 partition by tbname") + tdSql.error(f"select tbname , tail(c1,2) from {dbname}.stb1 group by tbname") + tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname") tdSql.checkRows(4) # bug need fix - # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname") + # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname") # tdSql.checkRows(4) - # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname order by tbname") + # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname order by tbname") # tdSql.checkRows(4) - # tdSql.query(" select tbname , count(c1) from stb1 partition by tbname order by tbname ") + # tdSql.query(f"select tbname , count(c1) from {dbname}.stb1 partition by tbname order by tbname ") # tdSql.checkRows(2) - # tdSql.query(" select tbname , max(c1) ,c1 from stb1 partition by tbname order by tbname ") + # tdSql.query(f"select tbname , max(c1) ,c1 from {dbname}.stb1 partition by tbname order by tbname ") # tdSql.checkRows(2) - # tdSql.query(" select tbname ,first(c1) from stb1 partition by tbname order by tbname ") + # tdSql.query(f"select tbname ,first(c1) from {dbname}.stb1 partition by tbname order by tbname ") # tdSql.checkRows(2) - tdSql.query("select tail(c1,2) from stb1 partition by tbname") + tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname") tdSql.checkRows(4) # # bug need fix - # tdSql.query(" select tbname , unique(c1) from stb1 where t1 = 0 partition by tbname ") + # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where t1 = 0 partition by tbname ") # tdSql.checkRows(2) - # tdSql.query(" select tbname , unique(c1) from stb1 where t1 = 0 partition by tbname order by tbname ") + # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where t1 = 0 partition by tbname order by tbname ") # tdSql.checkRows(2) - # tdSql.query(" select tbname , unique(c1) from stb1 where c1 = 0 partition by tbname order by tbname ") + # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where c1 = 0 partition by tbname order by tbname ") # tdSql.checkRows(3) - # tdSql.query(" select tbname , unique(c1) from stb1 where c1 = 0 partition by tbname ") + # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where c1 = 0 partition by tbname ") # tdSql.checkRows(3) - tdSql.query(" select unique(t1) from stb1 ") + tdSql.query(f"select unique(t1) from {dbname}.stb1 ") tdSql.checkRows(2) - tdSql.query(" select unique(t1+c1) from stb1 ") + tdSql.query(f"select unique(t1+c1) from {dbname}.stb1 ") tdSql.checkRows(13) - tdSql.query(" select unique(t1+c1) from stb1 partition by tbname ") + tdSql.query(f"select unique(t1+c1) from {dbname}.stb1 partition by tbname ") tdSql.checkRows(20) - tdSql.query(" select unique(t1) from stb1 partition by tbname ") + tdSql.query(f"select unique(t1) from {dbname}.stb1 partition by tbname ") tdSql.checkRows(2) # nest query - tdSql.query(" select unique(c1) from (select _rowts , t1 ,c1 , tbname from stb1 ) ") + tdSql.query(f"select unique(c1) from (select _rowts , t1 ,c1 , tbname from {dbname}.stb1 ) ") tdSql.checkRows(11) tdSql.checkData(0,0,6) tdSql.checkData(10,0,3) - tdSql.query("select unique(t1) from (select _rowts , t1 , tbname from stb1 )") + tdSql.query(f"select unique(t1) from (select _rowts , t1 , tbname from {dbname}.stb1 )") tdSql.checkRows(2) tdSql.checkData(0,0,4) tdSql.checkData(1,0,1) - def check_boundary_values(self): + def check_boundary_values(self, dbname="bound_test"): - tdSql.execute("drop database if exists bound_test") - tdSql.execute("create database if not exists bound_test") - tdSql.execute("use bound_test") + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname}") tdSql.execute( - "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" ) - tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )') tdSql.execute( - f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now()+1s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+1s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now()+2s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+2s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.execute( - f"insert into sub1_bound values ( now()+3s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+3s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) tdSql.error( - f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" ) - tdSql.query("select unique(c2) from sub1_bound order by 1 desc") + tdSql.query(f"select unique(c2) from {dbname}.sub1_bound order by 1 desc") tdSql.checkRows(5) tdSql.checkData(0,0,9223372036854775807) diff --git a/tests/system-test/2-query/upper.py b/tests/system-test/2-query/upper.py index bb485161dd12885175c470e8b5542b1ab011f186..f15a6f3ba76d3acb5645f443cf068d4cce7d9755 100644 --- a/tests/system-test/2-query/upper.py +++ b/tests/system-test/2-query/upper.py @@ -95,16 +95,16 @@ class TDTestCase: return sqls - def __test_current(self): + def __test_current(self, dbname="db"): tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: self.__upper_current_check(tb) tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") - def __test_error(self): + def __test_error(self, dbname="db"): tdLog.printNoPrefix("==========err sql condition check , must return error==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"] for tb in tbname: for errsql in self.__upper_err_check(tb): @@ -112,22 +112,20 @@ class TDTestCase: tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") - def all_test(self): - self.__test_current() - self.__test_error() + def all_test(self, dbname="db"): + self.__test_current(dbname) + self.__test_error(dbname) - - def __create_tb(self): - tdSql.prepare() + def __create_tb(self, dbname="db"): tdLog.printNoPrefix("==========step1:create table") - create_stb_sql = f'''create table stb1( + create_stb_sql = f'''create table {dbname}.stb1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp - ) tags (t1 int) + ) tags (tag1 int) ''' - create_ntb_sql = f'''create table t1( + create_ntb_sql = f'''create table {dbname}.t1( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp @@ -137,83 +135,82 @@ class TDTestCase: tdSql.execute(create_ntb_sql) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') - def __insert_data(self, rows): + def __insert_data(self, rows, dbname="db"): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) for i in range(rows): tdSql.execute( - f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )" + f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" ) tdSql.execute( - f'''insert into ct1 values - ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } ) - ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } ) + f'''insert into {dbname}.ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) ''' ) tdSql.execute( - f'''insert into ct4 values + f'''insert into {dbname}.ct4 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, - { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000} + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} ) ( { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, - { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000} + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} ) ''' ) tdSql.execute( - f'''insert into ct2 values + f'''insert into {dbname}.ct2 values ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) - ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, - { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000 } + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } ) ( { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, - { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000 } + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } ) ''' ) for i in range(rows): - insert_data = f'''insert into t1 values + insert_data = f'''insert into {dbname}.t1 values ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, - "binary_{i}", "nchar_{i}", { now_time - 1000 * i } ) + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) ''' tdSql.execute(insert_data) tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, - "binary_limit-1", "nchar_limit-1", { now_time - 86400000 } + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } ) ( { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, - "binary_limit-2", "nchar_limit-2", { now_time - 172800000 } + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } ) ''' ) - def run(self): tdSql.prepare() @@ -226,8 +223,7 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - tdDnodes.stop(1) - tdDnodes.start(1) + tdSql.execute("flush database db") tdSql.execute("use db") diff --git a/tests/system-test/2-query/varchar.py b/tests/system-test/2-query/varchar.py index 5cc6c8e39965453c646cb267774564af1a66f42d..17c3ea633357cf16a8b17e52c180192d07e52a87 100644 --- a/tests/system-test/2-query/varchar.py +++ b/tests/system-test/2-query/varchar.py @@ -14,43 +14,44 @@ class TDTestCase: tdSql.init(conn.cursor()) def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + dbname = "db" tdSql.prepare() tdLog.printNoPrefix("==========step1:create table") tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 varchar(16),c9 nchar(32), c10 timestamp) tags (t1 int) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 varchar(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(4): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )') tdLog.printNoPrefix("==========step2:insert data") for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'varchar0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'varchar9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'varchar0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'varchar9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "varchar1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "varchar2", "nchar2", now()+2a ) @@ -70,7 +71,7 @@ class TDTestCase: tdLog.printNoPrefix("==========step3: cast on varchar") - tdSql.query("select c8 from ct1") + tdSql.query(f"select c8 from {dbname}.ct1") for i in range(tdSql.queryRows): tdSql.checkData(i,0, data_ct1_c8[i]) diff --git a/tests/system-test/7-tmq/stbTagFilter-1ctb.py b/tests/system-test/7-tmq/stbTagFilter-1ctb.py index 6a26d2ce1f38774b2d63031c518883641c23f864..6cb152342be5c80b5f755d0b3f2f7e7bf1c7894a 100644 --- a/tests/system-test/7-tmq/stbTagFilter-1ctb.py +++ b/tests/system-test/7-tmq/stbTagFilter-1ctb.py @@ -259,7 +259,6 @@ class TDTestCase: self.tmqCase1() self.tmqCase2() - def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py b/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py index 20e363341f914b66e5ba73f0d5521b393e5743f1..4cb208b616097815ce8dfb099854c5c936fcf08c 100644 --- a/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py +++ b/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py @@ -99,8 +99,8 @@ class TDTestCase: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - if not ((totalConsumeRows >= expectrowcnt * 3/4) and (totalConsumeRows < expectrowcnt)): - tdLog.exit("tmq consume rows error with snapshot = 0!") + # if not ((totalConsumeRows >= expectrowcnt * 3/4) and (totalConsumeRows < expectrowcnt)): + # tdLog.exit("tmq consume rows error with snapshot = 0!") tdLog.info("wait subscriptions exit ....") tmqCom.waitSubscriptionExit(tdSql, topicFromDb) @@ -192,8 +192,8 @@ class TDTestCase: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - if not ((totalConsumeRows >= expectrowcnt / 2 * (1 + 3/4)) and (totalConsumeRows < expectrowcnt)): - tdLog.exit("tmq consume rows error with snapshot = 0!") + # if not ((totalConsumeRows >= expectrowcnt / 2 * (1 + 3/4)) and (totalConsumeRows < expectrowcnt)): + # tdLog.exit("tmq consume rows error with snapshot = 0!") tdLog.info("wait subscriptions exit ....") tmqCom.waitSubscriptionExit(tdSql, topicFromDb) diff --git a/tests/system-test/7-tmq/tmqDropStbCtb.py b/tests/system-test/7-tmq/tmqDropStbCtb.py index 992a128ac077a35708a1ef123ba61bf3352feb78..704811d083c47db53592cce8db85c71733a29057 100644 --- a/tests/system-test/7-tmq/tmqDropStbCtb.py +++ b/tests/system-test/7-tmq/tmqDropStbCtb.py @@ -155,8 +155,9 @@ class TDTestCase: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - if not ((totalConsumeRows > expectrowcnt / 2) and (totalConsumeRows < expectrowcnt)): - tdLog.exit("tmq consume rows error with snapshot = 0!") + if self.snapshot == 0: + if not ((totalConsumeRows > expectrowcnt / 2) and (totalConsumeRows < expectrowcnt)): + tdLog.exit("tmq consume rows error with snapshot = 0!") tdLog.info("wait subscriptions exit ....") tmqCom.waitSubscriptionExit(tdSql, topicFromDb) @@ -246,8 +247,9 @@ class TDTestCase: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - if not ((totalConsumeRows > expectrowcnt / 2) and (totalConsumeRows < expectrowcnt)): - tdLog.exit("tmq consume rows error with snapshot = 0!") + if self.snapshot == 0: + if not ((totalConsumeRows > expectrowcnt / 2) and (totalConsumeRows < expectrowcnt)): + tdLog.exit("tmq consume rows error with snapshot = 0!") tdLog.info("wait subscriptions exit ....") tmqCom.waitSubscriptionExit(tdSql, topicFromDb) diff --git a/tests/system-test/7-tmq/tmq_taosx.py b/tests/system-test/7-tmq/tmq_taosx.py index cd13535684501d98673923254c7fe83adc432851..07602ec29f69f9fbd0dab90935e0922996c80f80 100644 --- a/tests/system-test/7-tmq/tmq_taosx.py +++ b/tests/system-test/7-tmq/tmq_taosx.py @@ -20,15 +20,9 @@ class TDTestCase: tdSql.init(conn.cursor()) #tdSql.init(conn.cursor(), logSql) # output sql.txt file - def checkFileContent(self): - buildPath = tdCom.getBuildPath() - cfgPath = tdCom.getClientCfgPath() - cmdStr = '%s/build/bin/tmq_taosx_ci -c %s'%(buildPath, cfgPath) - tdLog.info(cmdStr) - os.system(cmdStr) - - srcFile = '%s/../log/tmq_taosx_tmp.source'%(cfgPath) - dstFile = '%s/../log/tmq_taosx_tmp.result'%(cfgPath) + def checkJson(self, cfgPath, name): + srcFile = '%s/../log/%s.source'%(cfgPath, name) + dstFile = '%s/../log/%s.result'%(cfgPath, name) tdLog.info("compare file: %s, %s"%(srcFile, dstFile)) consumeFile = open(srcFile, mode='r') @@ -43,7 +37,31 @@ class TDTestCase: tdLog.exit("compare error: %s != %s"%src, dst) else: break + return + def checkDropData(self): + tdSql.execute('use db_taosx') + tdSql.query("show tables") + tdSql.checkRows(2) + tdSql.query("select * from jt order by i") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 11) + tdSql.checkData(0, 2, '{"k1":1,"k2":"hello"}') + tdSql.checkData(1, 2, None) + + tdSql.execute('use abc1') + tdSql.query("show tables") + tdSql.checkRows(2) + tdSql.query("select * from jt order by i") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 11) + tdSql.checkData(0, 2, '{"k1":1,"k2":"hello"}') + tdSql.checkData(1, 2, None) + return + + def checkData(self): tdSql.execute('use db_taosx') tdSql.query("select * from ct3 order by c1 desc") tdSql.checkRows(2) @@ -52,6 +70,48 @@ class TDTestCase: tdSql.checkData(1, 1, 23) tdSql.checkData(1, 4, None) + tdSql.query("select * from st1 order by ts") + tdSql.checkRows(8) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 3) + tdSql.checkData(4, 1, 4) + tdSql.checkData(6, 1, 23) + + tdSql.checkData(0, 2, 2) + tdSql.checkData(1, 2, 4) + tdSql.checkData(4, 2, 3) + tdSql.checkData(6, 2, 32) + + tdSql.checkData(0, 3, 'a') + tdSql.checkData(1, 3, 'b') + tdSql.checkData(4, 3, 'hwj') + tdSql.checkData(6, 3, 's21ds') + + tdSql.checkData(0, 4, None) + tdSql.checkData(1, 4, None) + tdSql.checkData(5, 4, 940) + tdSql.checkData(6, 4, None) + + tdSql.checkData(0, 5, 1000) + tdSql.checkData(1, 5, 2000) + tdSql.checkData(4, 5, 1000) + tdSql.checkData(6, 5, 5000) + + tdSql.checkData(0, 6, 'ttt') + tdSql.checkData(1, 6, None) + tdSql.checkData(4, 6, 'ttt') + tdSql.checkData(6, 6, None) + + tdSql.checkData(0, 7, True) + tdSql.checkData(1, 7, None) + tdSql.checkData(4, 7, True) + tdSql.checkData(6, 7, None) + + tdSql.checkData(0, 8, None) + tdSql.checkData(1, 8, None) + tdSql.checkData(4, 8, None) + tdSql.checkData(6, 8, None) + tdSql.query("select * from ct1") tdSql.checkRows(4) @@ -74,12 +134,82 @@ class TDTestCase: tdSql.checkData(0, 2, None) tdSql.checkData(1, 1, 1) tdSql.checkData(1, 2, '{"k1":1,"k2":"hello"}') + return + + def checkWal1Vgroup(self): + buildPath = tdCom.getBuildPath() + cfgPath = tdCom.getClientCfgPath() + cmdStr = '%s/build/bin/tmq_taosx_ci -c %s -sv 1 -dv 1'%(buildPath, cfgPath) + tdLog.info(cmdStr) + os.system(cmdStr) + + self.checkJson(cfgPath, "tmq_taosx_tmp") + self.checkData() + + return + + def checkWalMultiVgroups(self): + buildPath = tdCom.getBuildPath() + cmdStr = '%s/build/bin/tmq_taosx_ci -sv 3 -dv 5'%(buildPath) + tdLog.info(cmdStr) + os.system(cmdStr) + + self.checkData() + + return + + def checkWalMultiVgroupsWithDropTable(self): + buildPath = tdCom.getBuildPath() + cmdStr = '%s/build/bin/tmq_taosx_ci -sv 3 -dv 5 -d'%(buildPath) + tdLog.info(cmdStr) + os.system(cmdStr) + + self.checkDropData() + + return + + def checkSnapshot1Vgroup(self): + buildPath = tdCom.getBuildPath() + cfgPath = tdCom.getClientCfgPath() + cmdStr = '%s/build/bin/tmq_taosx_ci -c %s -sv 1 -dv 1 -s'%(buildPath, cfgPath) + tdLog.info(cmdStr) + os.system(cmdStr) + + self.checkJson(cfgPath, "tmq_taosx_tmp_snapshot") + self.checkData() + + return + + def checkSnapshotMultiVgroups(self): + buildPath = tdCom.getBuildPath() + cmdStr = '%s/build/bin/tmq_taosx_ci -sv 2 -dv 4 -s'%(buildPath) + tdLog.info(cmdStr) + os.system(cmdStr) + + self.checkData() + + return + + def checkSnapshotMultiVgroupsWithDropTable(self): + buildPath = tdCom.getBuildPath() + cmdStr = '%s/build/bin/tmq_taosx_ci -sv 2 -dv 4 -s -d'%(buildPath) + tdLog.info(cmdStr) + os.system(cmdStr) + + self.checkDropData() return def run(self): tdSql.prepare() - self.checkFileContent() + self.checkWal1Vgroup() + self.checkSnapshot1Vgroup() + + self.checkWalMultiVgroups() + self.checkSnapshotMultiVgroups() + + self.checkWalMultiVgroupsWithDropTable() + self.checkSnapshotMultiVgroupsWithDropTable() def stop(self): tdSql.close() diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index bb651fad27a7c4864342e0b40e399ec9baf45365..da164aa9b28ad7fb8eaf5ed2459941bbcebf2f1a 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -110,56 +110,111 @@ python3 ./test.py -f 2-query/histogram.py python3 ./test.py -f 2-query/histogram.py -R python3 ./test.py -f 2-query/hyperloglog.py python3 ./test.py -f 2-query/hyperloglog.py -R +python3 ./test.py -f 2-query/interp.py +python3 ./test.py -f 2-query/interp.py -R python3 ./test.py -f 2-query/irate.py -# python3 ./test.py -f 2-query/irate.py -R +python3 ./test.py -f 2-query/irate.py -R python3 ./test.py -f 2-query/join.py python3 ./test.py -f 2-query/join.py -R - -python3 ./test.py -f 2-query/interp.py -python3 ./test.py -f 2-query/interp.py -R - +python3 ./test.py -f 2-query/last_row.py +python3 ./test.py -f 2-query/last_row.py -R +python3 ./test.py -f 2-query/last.py +python3 ./test.py -f 2-query/last.py -R +python3 ./test.py -f 2-query/leastsquares.py +python3 ./test.py -f 2-query/leastsquares.py -R +python3 ./test.py -f 2-query/length.py +python3 ./test.py -f 2-query/length.py -R +python3 ./test.py -f 2-query/log.py +# python3 ./test.py -f 2-query/log.py -R +python3 ./test.py -f 2-query/lower.py +python3 ./test.py -f 2-query/lower.py -R +python3 ./test.py -f 2-query/ltrim.py +python3 ./test.py -f 2-query/ltrim.py -R +python3 ./test.py -f 2-query/mavg.py +python3 ./test.py -f 2-query/mavg.py -R +python3 ./test.py -f 2-query/max_partition.py +python3 ./test.py -f 2-query/max_partition.py -R +python3 ./test.py -f 2-query/max.py +python3 ./test.py -f 2-query/max.py -R +python3 ./test.py -f 2-query/min.py +python3 ./test.py -f 2-query/min.py -R +python3 ./test.py -f 2-query/Now.py +python3 ./test.py -f 2-query/Now.py -R +python3 ./test.py -f 2-query/percentile.py +python3 ./test.py -f 2-query/percentile.py -R +python3 ./test.py -f 2-query/pow.py +python3 ./test.py -f 2-query/pow.py -R +python3 ./test.py -f 2-query/query_cols_tags_and_or.py +python3 ./test.py -f 2-query/query_cols_tags_and_or.py -R +python3 ./test.py -f 2-query/round.py +python3 ./test.py -f 2-query/round.py -R +python3 ./test.py -f 2-query/rtrim.py +python3 ./test.py -f 2-query/rtrim.py -R +python3 ./test.py -f 2-query/sample.py +python3 ./test.py -f 2-query/sample.py -R +python3 ./test.py -f 2-query/sin.py +python3 ./test.py -f 2-query/sin.py -R +python3 ./test.py -f 2-query/smaTest.py +python3 ./test.py -f 2-query/smaTest.py -R +python3 ./test.py -f 2-query/sml.py +python3 ./test.py -f 2-query/sml.py -R +python3 ./test.py -f 2-query/spread.py +python3 ./test.py -f 2-query/spread.py -R +python3 ./test.py -f 2-query/sqrt.py +python3 ./test.py -f 2-query/sqrt.py -R +python3 ./test.py -f 2-query/statecount.py +python3 ./test.py -f 2-query/statecount.py -R +python3 ./test.py -f 2-query/stateduration.py +python3 ./test.py -f 2-query/stateduration.py -R +python3 ./test.py -f 2-query/substr.py +python3 ./test.py -f 2-query/substr.py -R +python3 ./test.py -f 2-query/sum.py +python3 ./test.py -f 2-query/sum.py -R +python3 ./test.py -f 2-query/tail.py +python3 ./test.py -f 2-query/tail.py -R +python3 ./test.py -f 2-query/tan.py +# python3 ./test.py -f 2-query/tan.py -R +python3 ./test.py -f 2-query/Timediff.py +python3 ./test.py -f 2-query/Timediff.py -R +python3 ./test.py -f 2-query/timetruncate.py +# python3 ./test.py -f 2-query/timetruncate.py -R +python3 ./test.py -f 2-query/timezone.py +python3 ./test.py -f 2-query/timezone.py -R +python3 ./test.py -f 2-query/To_iso8601.py +python3 ./test.py -f 2-query/To_iso8601.py -R +python3 ./test.py -f 2-query/To_unixtimestamp.py +python3 ./test.py -f 2-query/To_unixtimestamp.py -R +python3 ./test.py -f 2-query/Today.py +# python3 ./test.py -f 2-query/Today.py -R +python3 ./test.py -f 2-query/top.py +python3 ./test.py -f 2-query/top.py -R +python3 ./test.py -f 2-query/tsbsQuery.py +python3 ./test.py -f 2-query/tsbsQuery.py -R +python3 ./test.py -f 2-query/ttl_comment.py +python3 ./test.py -f 2-query/ttl_comment.py -R +python3 ./test.py -f 2-query/twa.py +python3 ./test.py -f 2-query/twa.py -R +python3 ./test.py -f 2-query/union.py +python3 ./test.py -f 2-query/union.py -R +python3 ./test.py -f 2-query/unique.py +python3 ./test.py -f 2-query/unique.py -R +python3 ./test.py -f 2-query/upper.py +python3 ./test.py -f 2-query/upper.py -R +python3 ./test.py -f 2-query/varchar.py +python3 ./test.py -f 2-query/varchar.py -R python3 ./test.py -f 1-insert/update_data.py python3 ./test.py -f 1-insert/delete_data.py -python3 ./test.py -f 2-query/varchar.py -python3 ./test.py -f 2-query/ltrim.py -python3 ./test.py -f 2-query/rtrim.py -python3 ./test.py -f 2-query/length.py -python3 ./test.py -f 2-query/upper.py -python3 ./test.py -f 2-query/lower.py python3 ./test.py -f 2-query/join2.py -python3 ./test.py -f 2-query/substr.py -python3 ./test.py -f 2-query/union.py python3 ./test.py -f 2-query/union1.py python3 ./test.py -f 2-query/concat2.py -python3 ./test.py -f 2-query/spread.py -python3 ./test.py -f 2-query/leastsquares.py -python3 ./test.py -f 2-query/timezone.py -python3 ./test.py -f 2-query/Now.py -python3 ./test.py -f 2-query/Today.py -python3 ./test.py -f 2-query/max.py -python3 ./test.py -f 2-query/min.py -python3 ./test.py -f 2-query/last.py -python3 ./test.py -f 2-query/To_iso8601.py -python3 ./test.py -f 2-query/To_unixtimestamp.py -python3 ./test.py -f 2-query/timetruncate.py -python3 ./test.py -f 2-query/Timediff.py python3 ./test.py -f 2-query/json_tag.py -python3 ./test.py -f 2-query/top.py -python3 ./test.py -f 2-query/percentile.py -python3 ./test.py -f 2-query/round.py -python3 ./test.py -f 2-query/log.py -python3 ./test.py -f 2-query/pow.py -python3 ./test.py -f 2-query/sqrt.py -python3 ./test.py -f 2-query/sin.py -python3 ./test.py -f 2-query/tan.py -python3 ./test.py -f 2-query/query_cols_tags_and_or.py # python3 ./test.py -f 2-query/nestedQuery.py # TD-15983 subquery output duplicate name column. # Please Xiangyang Guo modify the following script @@ -167,19 +222,8 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py python3 ./test.py -f 2-query/elapsed.py python3 ./test.py -f 2-query/csum.py -#python3 ./test.py -f 2-query/mavg.py -python3 ./test.py -f 2-query/sample.py python3 ./test.py -f 2-query/function_diff.py -python3 ./test.py -f 2-query/unique.py -python3 ./test.py -f 2-query/stateduration.py -python3 ./test.py -f 2-query/statecount.py -python3 ./test.py -f 2-query/tail.py -python3 ./test.py -f 2-query/ttl_comment.py -python3 ./test.py -f 2-query/twa.py python3 ./test.py -f 2-query/queryQnode.py -python3 ./test.py -f 2-query/max_partition.py -python3 ./test.py -f 2-query/last_row.py -python3 ./test.py -f 2-query/tsbsQuery.py python3 ./test.py -f 6-cluster/5dnode1mnode.py python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3 diff --git a/tests/system-test/simpletest.bat b/tests/system-test/simpletest.bat index 656828aa1ea4b1b2681dab2edac2c94178b5176a..cc4ae1795534e943ed603e2db5202ce272f54ab1 100644 --- a/tests/system-test/simpletest.bat +++ b/tests/system-test/simpletest.bat @@ -4,9 +4,9 @@ python3 .\test.py -f 0-others\taosShellError.py python3 .\test.py -f 0-others\taosShellNetChk.py python3 .\test.py -f 0-others\telemetry.py python3 .\test.py -f 0-others\taosdMonitor.py -python3 .\test.py -f 0-others\udfTest.py -python3 .\test.py -f 0-others\udf_create.py -python3 .\test.py -f 0-others\udf_restart_taosd.py +@REM python3 .\test.py -f 0-others\udfTest.py +@REM python3 .\test.py -f 0-others\udf_create.py +@REM python3 .\test.py -f 0-others\udf_restart_taosd.py @REM python3 .\test.py -f 0-others\cachelast.py @REM python3 .\test.py -f 0-others\user_control.py diff --git a/tests/system-test/test.py b/tests/system-test/test.py index 5dc6139410bf7d53da8e8fd36e047bb81a7e5eae..2f482e42773c3db665ad6276589b8d7b39cf9ce1 100644 --- a/tests/system-test/test.py +++ b/tests/system-test/test.py @@ -194,7 +194,7 @@ if __name__ == "__main__": processID = subprocess.check_output(psCmd, shell=True) for port in range(6030, 6041): - usePortPID = "lsof -i tcp:%d | grep LISTEn | awk '{print $2}'" % port + usePortPID = "lsof -i tcp:%d | grep LISTEN | awk '{print $2}'" % port processID = subprocess.check_output(usePortPID, shell=True) if processID: @@ -206,11 +206,13 @@ if __name__ == "__main__": time.sleep(2) if restful: - toBeKilled = "taosadapter" + toBeKilled = "taosadapt" - killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled + # killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled + killCmd = f"pkill {toBeKilled}" psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + # psCmd = f"pgrep {toBeKilled}" processID = subprocess.check_output(psCmd, shell=True) while(processID): @@ -218,15 +220,15 @@ if __name__ == "__main__": time.sleep(1) processID = subprocess.check_output(psCmd, shell=True) - for port in range(6030, 6041): - usePortPID = "lsof -i tcp:%d | grep LISTEn | awk '{print $2}'" % port - processID = subprocess.check_output(usePortPID, shell=True) + port = 6041 + usePortPID = f"lsof -i tcp:{port} | grep LISTEN | awk '{{print $2}}'" + processID = subprocess.check_output(usePortPID, shell=True) - if processID: - killCmd = "kill -TERM %s" % processID - os.system(killCmd) - fuserCmd = "fuser -k -n tcp %d" % port - os.system(fuserCmd) + if processID: + killCmd = f"kill -TERM {processID}" + os.system(killCmd) + fuserCmd = f"fuser -k -n tcp {port}" + os.system(fuserCmd) tdLog.info('stop taosadapter') diff --git a/tests/test/c/sml_test.c b/tests/test/c/sml_test.c index 50249a5c5621aad4821fd7866950021f240c1c8a..18181d2073e154c7f3fe183d164e1f858fddcb4f 100644 --- a/tests/test/c/sml_test.c +++ b/tests/test/c/sml_test.c @@ -1089,7 +1089,7 @@ int sml_add_tag_col_Test() { if (code) return code; const char *sql1[] = { - "macylr,id=macylr_17875_1804,t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\",t11=127i8,t10=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryColValue\",c8=L\"ncharColValue\",c9=7u64,c11=L\"ncharColValue\",c10=f 1626006833639000000" + "macylr,id=macylr_17875_1804,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\",t11=127i8,t10=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c8=L\"ncharColValue\",c9=7u64,c11=L\"ncharColValue\",c10=f 1626006833639000000" }; pRes = taos_schemaless_insert(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_LINE_PROTOCOL, 0); diff --git a/tests/test/c/tmqSim.c b/tests/test/c/tmqSim.c index d39ade7e91495d2b3ff1924efdb78103d7b423cc..71b31ba1071c977d9fd3d2ceb046bdff02ca53df 100644 --- a/tests/test/c/tmqSim.c +++ b/tests/test/c/tmqSim.c @@ -492,7 +492,6 @@ static char* shellFormatTimestamp(char* buf, int64_t val, int32_t precision) { static void shellDumpFieldToFile(TdFilePtr pFile, const char* val, TAOS_FIELD* field, int32_t length, int32_t precision) { if (val == NULL) { - taosFprintfFile(pFile, "%s", TSDB_DATA_NULL_STR); return; } @@ -540,13 +539,34 @@ static void shellDumpFieldToFile(TdFilePtr pFile, const char* val, TAOS_FIELD* f case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_JSON: - memcpy(buf, val, length); - buf[length] = 0; - taosFprintfFile(pFile, "\'%s\'", buf); + { + char quotationStr[2]; + int32_t bufIndex = 0; + quotationStr[0] = 0; + quotationStr[1] = 0; + for (int32_t i = 0; i < length; i++) { + buf[bufIndex] = val[i]; + bufIndex++; + if (val[i] == '\"') { + buf[bufIndex] = val[i]; + bufIndex++; + quotationStr[0] = '\"'; + } + if (val[i] == ',') { + quotationStr[0] = '\"'; + } + } + buf[bufIndex] = 0; + if (length == 0) { + quotationStr[0] = '\"'; + } + + taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); + } break; case TSDB_DATA_TYPE_TIMESTAMP: shellFormatTimestamp(buf, *(int64_t*)val, precision); - taosFprintfFile(pFile, "'%s'", buf); + taosFprintfFile(pFile, "%s", buf); break; default: break; diff --git a/tests/test/c/tmq_taosx_ci.c b/tests/test/c/tmq_taosx_ci.c index ece7ad4819f2947cb0a474491255dd296136581b..2afa05b0120b91ba5b40e151e10fc28b204bc1c0 100644 --- a/tests/test/c/tmq_taosx_ci.c +++ b/tests/test/c/tmq_taosx_ci.c @@ -22,8 +22,16 @@ #include "types.h" static int running = 1; -TdFilePtr g_fp = NULL; -char dir[64]={0}; +TdFilePtr g_fp = NULL; +typedef struct{ + bool snapShot; + bool dropTable; + int srcVgroups; + int dstVgroups; + char dir[64]; +}Config; + +Config g_conf = {0}; static TAOS* use_db(){ TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); @@ -41,7 +49,6 @@ static TAOS* use_db(){ } static void msg_process(TAOS_RES* msg) { - /*memset(buf, 0, 1024);*/ printf("-----------topic-------------: %s\n", tmq_get_topic_name(msg)); printf("db: %s\n", tmq_get_db_name(msg)); printf("vg: %d\n", tmq_get_vgroup_id(msg)); @@ -51,8 +58,11 @@ static void msg_process(TAOS_RES* msg) { if (result) { printf("meta result: %s\n", result); } - taosFprintfFile(g_fp, result); - taosFprintfFile(g_fp, "\n"); + if(g_fp){ + taosFprintfFile(g_fp, result); + taosFprintfFile(g_fp, "\n"); + } + tmq_free_json_meta(result); } @@ -61,22 +71,10 @@ static void msg_process(TAOS_RES* msg) { int32_t ret = tmq_write_raw(pConn, raw); printf("write raw data: %s\n", tmq_err2str(ret)); -// else{ -// while(1){ -// int numOfRows = 0; -// void *pData = NULL; -// taos_fetch_raw_block(msg, &numOfRows, &pData); -// if(numOfRows == 0) break; -// printf("write data: tbname:%s, numOfRows:%d\n", tmq_get_table_name(msg), numOfRows); -// int ret = taos_write_raw_block(pConn, numOfRows, pData, tmq_get_table_name(msg)); -// printf("write raw data: %s\n", tmq_err2str(ret)); -// } -// } - taos_close(pConn); } -int32_t init_env() { +int32_t init_env(Config *conf) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); if (pConn == NULL) { return -1; @@ -89,13 +87,22 @@ int32_t init_env() { } taos_free_result(pRes); - pRes = taos_query(pConn, "create database if not exists db_taosx vgroups 1"); + char sql[128] = {0}; + snprintf(sql, 128, "create database if not exists db_taosx vgroups %d", conf->dstVgroups); + pRes = taos_query(pConn, sql); if (taos_errno(pRes) != 0) { printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes)); return -1; } taos_free_result(pRes); + pRes = taos_query(pConn, "drop topic if exists topic_db"); + if (taos_errno(pRes) != 0) { + printf("error in drop topic, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + pRes = taos_query(pConn, "drop database if exists abc1"); if (taos_errno(pRes) != 0) { printf("error in drop db, reason:%s\n", taos_errstr(pRes)); @@ -103,7 +110,8 @@ int32_t init_env() { } taos_free_result(pRes); - pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1"); + snprintf(sql, 128, "create database if not exists abc1 vgroups %d", conf->srcVgroups); + pRes = taos_query(pConn, sql); if (taos_errno(pRes) != 0) { printf("error in create db, reason:%s\n", taos_errstr(pRes)); return -1; @@ -133,7 +141,7 @@ int32_t init_env() { } taos_free_result(pRes); - pRes = taos_query(pConn, "insert into ct0 values(1626006833600, 1, 2, 'a')"); + pRes = taos_query(pConn, "insert into ct0 values(1626006833400, 1, 2, 'a')"); if (taos_errno(pRes) != 0) { printf("failed to insert into ct0, reason:%s\n", taos_errstr(pRes)); return -1; @@ -168,7 +176,7 @@ int32_t init_env() { } taos_free_result(pRes); - pRes = taos_query(pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, 'ddd') ct0 values(1626006833602, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')"); + pRes = taos_query(pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, 'ddd') ct0 values(1626006833603, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')"); if (taos_errno(pRes) != 0) { printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes)); return -1; @@ -224,6 +232,22 @@ int32_t init_env() { } taos_free_result(pRes); + if(conf->dropTable){ + pRes = taos_query(pConn, "drop table ct3 ct1"); + if (taos_errno(pRes) != 0) { + printf("failed to drop child table ct3, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "drop table st1"); + if (taos_errno(pRes) != 0) { + printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + } + pRes = taos_query(pConn, "create table if not exists n1(ts timestamp, c1 int, c2 nchar(4))"); if (taos_errno(pRes) != 0) { printf("failed to create normal table n1, reason:%s\n", taos_errstr(pRes)); @@ -273,6 +297,15 @@ int32_t init_env() { } taos_free_result(pRes); + if(conf->dropTable){ + pRes = taos_query(pConn, "drop table n1"); + if (taos_errno(pRes) != 0) { + printf("failed to drop normal table n1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + } + pRes = taos_query(pConn, "create table jt(ts timestamp, i int) tags(t json)"); if (taos_errno(pRes) != 0) { printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes)); @@ -308,6 +341,23 @@ int32_t init_env() { } taos_free_result(pRes); + if(conf->dropTable){ + pRes = taos_query(pConn, + "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 " + "nchar(8), t4 bool)"); + if (taos_errno(pRes) != 0) { + printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "drop table st1"); + if (taos_errno(pRes) != 0) { + printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + } taos_close(pConn); return 0; } @@ -327,9 +377,9 @@ int32_t create_topic() { } taos_free_result(pRes); - pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1"); + pRes = taos_query(pConn, "create topic topic_db with meta as database abc1"); if (taos_errno(pRes) != 0) { - printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes)); + printf("failed to create topic topic_db, reason:%s\n", taos_errstr(pRes)); return -1; } taos_free_result(pRes); @@ -342,18 +392,7 @@ void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { printf("commit %d tmq %p param %p\n", code, tmq, param); } -tmq_t* build_consumer() { -#if 0 - TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); - assert(pConn != NULL); - - TAOS_RES* pRes = taos_query(pConn, "use abc1"); - if (taos_errno(pRes) != 0) { - printf("error in use db, reason:%s\n", taos_errstr(pRes)); - } - taos_free_result(pRes); -#endif - +tmq_t* build_consumer(Config *config) { tmq_conf_t* conf = tmq_conf_new(); tmq_conf_set(conf, "group.id", "tg2"); tmq_conf_set(conf, "client.id", "my app 1"); @@ -363,7 +402,9 @@ tmq_t* build_consumer() { tmq_conf_set(conf, "enable.auto.commit", "true"); tmq_conf_set(conf, "enable.heartbeat.background", "true"); - /*tmq_conf_set(conf, "experimental.snapshot.enable", "true");*/ + if(config->snapShot){ + tmq_conf_set(conf, "experimental.snapshot.enable", "true"); + } tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); @@ -374,8 +415,7 @@ tmq_t* build_consumer() { tmq_list_t* build_topic_list() { tmq_list_t* topic_list = tmq_list_new(); - tmq_list_append(topic_list, "topic_ctb_column"); - /*tmq_list_append(topic_list, "tmq_test_db_multi_insert_topic");*/ + tmq_list_append(topic_list, "topic_db"); return topic_list; } @@ -393,12 +433,7 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) { if (tmqmessage) { cnt++; msg_process(tmqmessage); - /*if (cnt >= 2) break;*/ - /*printf("get data\n");*/ taos_free_result(tmqmessage); - /*} else {*/ - /*break;*/ - /*tmq_commit_sync(tmq, NULL);*/ }else{ break; } @@ -411,52 +446,18 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) { fprintf(stderr, "%% Consumer closed\n"); } -void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) { - static const int MIN_COMMIT_COUNT = 1; - - int msg_count = 0; - int32_t code; - - if ((code = tmq_subscribe(tmq, topics))) { - fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code)); - return; - } - - tmq_list_t* subList = NULL; - tmq_subscription(tmq, &subList); - char** subTopics = tmq_list_to_c_array(subList); - int32_t sz = tmq_list_get_size(subList); - printf("subscribed topics: "); - for (int32_t i = 0; i < sz; i++) { - printf("%s, ", subTopics[i]); - } - printf("\n"); - tmq_list_destroy(subList); - - while (running) { - TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000); - if (tmqmessage) { - msg_process(tmqmessage); - taos_free_result(tmqmessage); - - /*tmq_commit_sync(tmq, NULL);*/ - /*if ((++msg_count % MIN_COMMIT_COUNT) == 0) tmq_commit(tmq, NULL, 0);*/ - } - } - - code = tmq_consumer_close(tmq); - if (code) - fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code)); - else - fprintf(stderr, "%% Consumer closed\n"); -} - -void initLogFile() { +void initLogFile(Config *conf) { char f1[256] = {0}; char f2[256] = {0}; - sprintf(f1, "%s/../log/tmq_taosx_tmp.source", dir); - sprintf(f2, "%s/../log/tmq_taosx_tmp.result", dir); + if(conf->snapShot){ + sprintf(f1, "%s/../log/tmq_taosx_tmp_snapshot.source", conf->dir); + sprintf(f2, "%s/../log/tmq_taosx_tmp_snapshot.result", conf->dir); + }else{ + sprintf(f1, "%s/../log/tmq_taosx_tmp.source", conf->dir); + sprintf(f2, "%s/../log/tmq_taosx_tmp.result", conf->dir); + } + TdFilePtr pFile = taosOpenFile(f1, TD_FILE_TEXT | TD_FILE_TRUNC | TD_FILE_STREAM); if (NULL == pFile) { fprintf(stderr, "Failed to open %s for save result\n", f1); @@ -469,52 +470,82 @@ void initLogFile() { fprintf(stderr, "Failed to open %s for save result\n", f2); exit(-1); } - char *result[] = { - "{\"type\":\"create\",\"tableName\":\"st1\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":6},{\"name\":\"c3\",\"type\":8,\"length\":16}],\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\",\"type\":1}]}", - "{\"type\":\"create\",\"tableName\":\"ct0\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":1000},{\"name\":\"t3\",\"type\":10,\"value\":\"\\\"ttt\\\"\"},{\"name\":\"t4\",\"type\":1,\"value\":1}]}", - "{\"type\":\"create\",\"tableName\":\"ct1\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":2000}]}", - "{\"type\":\"create\",\"tableName\":\"ct2\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[]}", - "{\"type\":\"create\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":3000}]}", - "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}", - "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":7,\"colName\":\"c3\",\"colType\":8,\"colLength\":64}", - "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":1,\"colName\":\"t2\",\"colType\":8,\"colLength\":64}", - "{\"type\":\"alter\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"alterType\":4,\"colName\":\"t1\",\"colValue\":\"5000\",\"colValueNull\":false}", - "{\"type\":\"create\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":10,\"length\":4}],\"tags\":[]}", - "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":5,\"colName\":\"c3\",\"colType\":5}", - "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":7,\"colName\":\"c2\",\"colType\":10,\"colLength\":8}", - "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":10,\"colName\":\"c3\",\"colNewName\":\"cc3\"}", - "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":9}", - "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":6,\"colName\":\"c1\"}", - "{\"type\":\"create\",\"tableName\":\"jt\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"i\",\"type\":4}],\"tags\":[{\"name\":\"t\",\"type\":15}]}", - "{\"type\":\"create\",\"tableName\":\"jt1\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[{\"name\":\"t\",\"type\":15,\"value\":\"{\\\"k1\\\":1,\\\"k2\\\":\\\"hello\\\"}\"}]}", - "{\"type\":\"create\",\"tableName\":\"jt2\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[]}" - }; - - for(int i = 0; i < sizeof(result)/sizeof(result[0]); i++){ - taosFprintfFile(pFile2, result[i]); - taosFprintfFile(pFile2, "\n"); + + if(conf->snapShot){ + char *result[] = { + "{\"type\":\"create\",\"tableName\":\"st1\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":6},{\"name\":\"c3\",\"type\":8,\"length\":64},{\"name\":\"c4\",\"type\":5}],\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\",\"type\":1},{\"name\":\"t2\",\"type\":8,\"length\":64}]}", + "{\"type\":\"create\",\"tableName\":\"ct0\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":1000},{\"name\":\"t3\",\"type\":10,\"value\":\"\\\"ttt\\\"\"},{\"name\":\"t4\",\"type\":1,\"value\":1}]}", + "{\"type\":\"create\",\"tableName\":\"ct1\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":2000}]}", + "{\"type\":\"create\",\"tableName\":\"ct2\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[]}", + "{\"type\":\"create\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":5000}]}", + "{\"type\":\"create\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c2\",\"type\":10,\"length\":8},{\"name\":\"cc3\",\"type\":5}],\"tags\":[]}", + "{\"type\":\"create\",\"tableName\":\"jt\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"i\",\"type\":4}],\"tags\":[{\"name\":\"t\",\"type\":15}]}", + "{\"type\":\"create\",\"tableName\":\"jt1\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[{\"name\":\"t\",\"type\":15,\"value\":\"{\\\"k1\\\":1,\\\"k2\\\":\\\"hello\\\"}\"}]}", + "{\"type\":\"create\",\"tableName\":\"jt2\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[]}", + }; + + for(int i = 0; i < sizeof(result)/sizeof(result[0]); i++){ + taosFprintfFile(pFile2, result[i]); + taosFprintfFile(pFile2, "\n"); + } + }else{ + char *result[] = { + "{\"type\":\"create\",\"tableName\":\"st1\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":6},{\"name\":\"c3\",\"type\":8,\"length\":16}],\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\",\"type\":1}]}", + "{\"type\":\"create\",\"tableName\":\"ct0\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":1000},{\"name\":\"t3\",\"type\":10,\"value\":\"\\\"ttt\\\"\"},{\"name\":\"t4\",\"type\":1,\"value\":1}]}", + "{\"type\":\"create\",\"tableName\":\"ct1\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":2000}]}", + "{\"type\":\"create\",\"tableName\":\"ct2\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[]}", + "{\"type\":\"create\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":3000}]}", + "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}", + "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":7,\"colName\":\"c3\",\"colType\":8,\"colLength\":64}", + "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":1,\"colName\":\"t2\",\"colType\":8,\"colLength\":64}", + "{\"type\":\"alter\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"alterType\":4,\"colName\":\"t1\",\"colValue\":\"5000\",\"colValueNull\":false}", + "{\"type\":\"create\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":10,\"length\":4}],\"tags\":[]}", + "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":5,\"colName\":\"c3\",\"colType\":5}", + "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":7,\"colName\":\"c2\",\"colType\":10,\"colLength\":8}", + "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":10,\"colName\":\"c3\",\"colNewName\":\"cc3\"}", + "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":9}", + "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":6,\"colName\":\"c1\"}", + "{\"type\":\"create\",\"tableName\":\"jt\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"i\",\"type\":4}],\"tags\":[{\"name\":\"t\",\"type\":15}]}", + "{\"type\":\"create\",\"tableName\":\"jt1\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[{\"name\":\"t\",\"type\":15,\"value\":\"{\\\"k1\\\":1,\\\"k2\\\":\\\"hello\\\"}\"}]}", + "{\"type\":\"create\",\"tableName\":\"jt2\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[]}" + }; + + for(int i = 0; i < sizeof(result)/sizeof(result[0]); i++){ + taosFprintfFile(pFile2, result[i]); + taosFprintfFile(pFile2, "\n"); + } } + taosCloseFile(&pFile2); } int main(int argc, char* argv[]) { - if(argc == 3 && strcmp(argv[1], "-c") == 0) { - strcpy(dir, argv[2]); - }else{ - strcpy(dir, "../../../sim/psim/cfg"); + for (int32_t i = 1; i < argc; i++) { + if(strcmp(argv[i], "-c") == 0){ + strcpy(g_conf.dir, argv[++i]); + }else if(strcmp(argv[i], "-s") == 0){ + g_conf.snapShot = true; + }else if(strcmp(argv[i], "-d") == 0){ + g_conf.dropTable = true; + }else if(strcmp(argv[i], "-sv") == 0){ + g_conf.srcVgroups = atol(argv[++i]); + }else if(strcmp(argv[i], "-dv") == 0){ + g_conf.dstVgroups = atol(argv[++i]); + } } printf("env init\n"); - initLogFile(); + if(strlen(g_conf.dir) != 0){ + initLogFile(&g_conf); + } - if (init_env() < 0) { + if (init_env(&g_conf) < 0) { return -1; } create_topic(); - tmq_t* tmq = build_consumer(); + tmq_t* tmq = build_consumer(&g_conf); tmq_list_t* topic_list = build_topic_list(); basic_consume_loop(tmq, topic_list); - /*sync_consume_loop(tmq, topic_list);*/ taosCloseFile(&g_fp); } diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 5751c347e3453bdc39bfb2a7754be545047ddabe..03097e31b9d65c48cdc543524d12dc0a80e2c6b3 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -82,6 +82,7 @@ ELSE () COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter ) EXECUTE_PROCESS( + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter COMMAND git rev-parse --short HEAD RESULT_VARIABLE commit_sha1 OUTPUT_VARIABLE taosadapter_commit_sha1 @@ -99,26 +100,36 @@ ELSE () MESSAGE("CURRENT SOURCE DIR ${CMAKE_CURRENT_SOURCE_DIR}") IF (TD_LINUX) include(ExternalProject) + set(_upx_prefix "$ENV{HOME}/.taos/externals/upx") + ExternalProject_Add(upx + PREFIX "${_upx_prefix}" + URL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + ) + ExternalProject_Add(taosadapter PREFIX "taosadapter" SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter BUILD_ALWAYS off - DEPENDS taos + DEPENDS taos upx BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config" PATCH_COMMAND COMMAND git clean -f -d BUILD_COMMAND - COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" - COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" + COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" + COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" INSTALL_COMMAND - COMMAND wget -nc https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz -O $ENV{HOME}/upx.tar.xz && tar -xvJf $ENV{HOME}/upx.tar.xz -C $ENV{HOME}/ --strip-components 1 > /dev/null && $ENV{HOME}/upx taosadapter || : + COMMAND ${_upx_prefix}/src/upx/upx taosadapter COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin ) + unset(_upx_prefix) ELSEIF (TD_DARWIN) include(ExternalProject) ExternalProject_Add(taosadapter @@ -131,8 +142,8 @@ ELSE () PATCH_COMMAND COMMAND git clean -f -d BUILD_COMMAND - COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" - COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" + COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" + COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" INSTALL_COMMAND COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ @@ -140,7 +151,42 @@ ELSE () COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin ) +# unset(_upx_prefix) + ELSEIF (TD_WINDOWS) + include(ExternalProject) + set(_upx_prefix "${CMAKE_BINARY_DIR}/.taos/externals/upx") + ExternalProject_Add(upx + PREFIX "${_upx_prefix}" + URL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-win32.zip + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + ) + + ExternalProject_Add(taosadapter + PREFIX "taosadapter" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter + BUILD_ALWAYS off + DEPENDS taos + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config" + PATCH_COMMAND + COMMAND git clean -f -d + BUILD_COMMAND + COMMAND set CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client + COMMAND set CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib + COMMAND go build -a -o taosadapter.exe -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" + COMMAND go build -a -o taosadapter-debug.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}" + INSTALL_COMMAND + COMMAND ${_upx_prefix}/src/upx/upx taosadapter.exe + COMMAND cmake -E copy taosadapter.exe ${CMAKE_BINARY_DIR}/build/bin + COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E copy taosadapter-debug.exe ${CMAKE_BINARY_DIR}/build/bin + ) + unset(_upx_prefix) ELSE () - MESSAGE("${Yellow} Windows system still use original embedded httpd ${ColourReset}") + MESSAGE("${Yellow} taosAdapter Not supported yet ${ColourReset}") ENDIF () ENDIF () diff --git a/tools/shell/inc/shellInt.h b/tools/shell/inc/shellInt.h index 358377f8049f4b8aaf5c1499a21e1e566dac5490..15f6f6dc6a362c8c94994727fe19fa090ca94c57 100644 --- a/tools/shell/inc/shellInt.h +++ b/tools/shell/inc/shellInt.h @@ -95,6 +95,7 @@ typedef struct { TAOS* conn; TdThread pid; tsem_t cancelSem; + bool exit; #ifdef WEBSOCKET WS_TAOS* ws_conn; bool stop_query; @@ -112,7 +113,7 @@ int32_t shellExecute(); int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision); void shellPrintHeader(TAOS_FIELD *fields, int32_t *width, int32_t num_fields); void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t length, int32_t precision); -void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision); +void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision); // shellUtil.c int32_t shellCheckIntSize(); void shellPrintVersion(); diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 724ac8fbfdd64bd732918c8915c1f7342c615b02..e54b98a0a640b024c6ef301de7d6e3e4aa190bf3 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -266,7 +266,6 @@ char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision) { void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision) { if (val == NULL) { - taosFprintfFile(pFile, "%s", TSDB_DATA_NULL_STR); return; } @@ -314,13 +313,34 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_JSON: - memcpy(buf, val, length); - buf[length] = 0; - taosFprintfFile(pFile, "\'%s\'", buf); + { + char quotationStr[2]; + int32_t bufIndex = 0; + quotationStr[0] = 0; + quotationStr[1] = 0; + for (int32_t i = 0; i < length; i++) { + buf[bufIndex] = val[i]; + bufIndex++; + if (val[i] == '\"') { + buf[bufIndex] = val[i]; + bufIndex++; + quotationStr[0] = '\"'; + } + if (val[i] == ',') { + quotationStr[0] = '\"'; + } + } + buf[bufIndex] = 0; + if (length == 0) { + quotationStr[0] = '\"'; + } + + taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); + } break; case TSDB_DATA_TYPE_TIMESTAMP: shellFormatTimestamp(buf, *(int64_t *)val, precision); - taosFprintfFile(pFile, "'%s'", buf); + taosFprintfFile(pFile, "%s", buf); break; default: break; @@ -948,6 +968,10 @@ void shellCleanup(void *arg) { taosResetTerminalMode(); } void *shellCancelHandler(void *arg) { setThreadName("shellCancelHandler"); while (1) { + if (shell.exit == true) { + break; + } + if (tsem_wait(&shell.cancelSem) != 0) { taosMsleep(10); continue; @@ -961,7 +985,7 @@ void *shellCancelHandler(void *arg) { taos_kill_query(shell.conn); #ifdef WEBSOCKET } -#endif +#endif #ifdef WINDOWS printf("\n%s", shell.info.promptHeader); #endif @@ -1009,7 +1033,7 @@ int32_t shellExecute() { if (shell.args.restful || shell.args.cloud) { if (shell_conn_ws_server(1)) { return -1; - } + } } else { #endif if (shell.args.auth == NULL) { @@ -1043,7 +1067,7 @@ int32_t shellExecute() { if (shell.args.restful || shell.args.cloud) { ws_close(shell.ws_conn); } else { -#endif +#endif taos_close(shell.conn); #ifdef WEBSOCKET } @@ -1079,7 +1103,12 @@ int32_t shellExecute() { taosThreadCreate(&shell.pid, NULL, shellThreadLoop, NULL); taosThreadJoin(shell.pid, NULL); taosThreadClear(&shell.pid); + if (shell.exit) { + tsem_post(&shell.cancelSem); + break; + } } + taosThreadJoin(spid, NULL); shellCleanupHistory(); return 0; diff --git a/tools/shell/src/shellMain.c b/tools/shell/src/shellMain.c index 703533f8a9d0772ccb466e48dbdf3319b020f8d3..964082f3c3576967457b95ee1b3ab1730bf71542 100644 --- a/tools/shell/src/shellMain.c +++ b/tools/shell/src/shellMain.c @@ -19,6 +19,7 @@ SShellObj shell = {0}; int main(int argc, char *argv[]) { + shell.exit = false; #ifdef WEBSOCKET shell.args.timeout = 10; shell.args.cloud = true; @@ -46,7 +47,7 @@ int main(int argc, char *argv[]) { shellPrintHelp(); return 0; } -#ifdef WEBSOCKET +#ifdef WEBSOCKET shellCheckConnectMode(); #endif taos_init(); diff --git a/tools/shell/src/shellUtil.c b/tools/shell/src/shellUtil.c index e5e61e0b24d4a93031b8a502882d65badd56e192..0430428c381bbcee3f4abf51a44e61bc849f094d 100644 --- a/tools/shell/src/shellUtil.c +++ b/tools/shell/src/shellUtil.c @@ -157,6 +157,6 @@ void shellExit() { taos_close(shell.conn); shell.conn = NULL; } + shell.exit = true; taos_cleanup(); - exit(EXIT_FAILURE); } diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c index 2dcab04b3f4bfd072d766ff1e25c015cf609466f..b8b8392b961a92263f791ee4b480e61a8c148efd 100644 --- a/tools/shell/src/shellWebsocket.c +++ b/tools/shell/src/shellWebsocket.c @@ -18,19 +18,19 @@ #include "shellInt.h" int shell_conn_ws_server(bool first) { - shell.ws_conn = ws_connect_with_dsn(shell.args.dsn); - if (!shell.ws_conn) { - fprintf(stderr, "failed to connect %s, reason: %s\n", - shell.args.dsn, ws_errstr(NULL)); - return -1; - } - if (first && shell.args.restful) { - fprintf(stdout, "successfully connect to %s\n\n", - shell.args.dsn); - } else if (first && shell.args.cloud) { - fprintf(stdout, "successfully connect to cloud service\n"); - } - return 0; + shell.ws_conn = ws_connect_with_dsn(shell.args.dsn); + if (!shell.ws_conn) { + fprintf(stderr, "failed to connect %s, reason: %s\n", + shell.args.dsn, ws_errstr(NULL)); + return -1; + } + if (first && shell.args.restful) { + fprintf(stdout, "successfully connect to %s\n\n", + shell.args.dsn); + } else if (first && shell.args.cloud) { + fprintf(stdout, "successfully connect to cloud service\n"); + } + return 0; } static int horizontalPrintWebsocket(WS_RES* wres, double* execute_time) { @@ -39,7 +39,7 @@ static int horizontalPrintWebsocket(WS_RES* wres, double* execute_time) { ws_fetch_block(wres, &data, &rows); *execute_time += (double)(ws_take_timing(wres)/1E6); if (!rows) { - return 0; + return 0; } int num_fields = ws_field_count(wres); TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres); @@ -64,7 +64,7 @@ static int horizontalPrintWebsocket(WS_RES* wres, double* execute_time) { putchar(' '); putchar('|'); } - putchar('\r'); + putchar('\r'); putchar('\n'); } numOfRows += rows; @@ -79,7 +79,7 @@ static int verticalPrintWebsocket(WS_RES* wres, double* pexecute_time) { ws_fetch_block(wres, &data, &rows); *pexecute_time += (double)(ws_take_timing(wres)/1E6); if (!rows) { - return 0; + return 0; } int num_fields = ws_field_count(wres); TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres); @@ -98,7 +98,7 @@ static int verticalPrintWebsocket(WS_RES* wres, double* pexecute_time) { uint32_t len; for (int i = 0; i < rows; i++) { printf("*************************** %d.row ***************************\n", - numOfRows + 1); + numOfRows + 1); for (int j = 0; j < num_fields; j++) { TAOS_FIELD* field = fields + j; int padding = (int)(maxColNameLen - strlen(field->name)); @@ -121,7 +121,7 @@ static int dumpWebsocketToFile(const char* fname, WS_RES* wres, double* pexecute } TdFilePtr pFile = taosOpenFile(fullname, - TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_STREAM); + TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_STREAM); if (pFile == NULL) { fprintf(stderr, "failed to open file: %s\r\n", fullname); return -1; @@ -132,7 +132,7 @@ static int dumpWebsocketToFile(const char* fname, WS_RES* wres, double* pexecute *pexecute_time += (double)(ws_take_timing(wres)/1E6); if (!rows) { taosCloseFile(&pFile); - return 0; + return 0; } int numOfRows = 0; TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres); @@ -207,7 +207,7 @@ void shellRunSingleCommandWebsocketImp(char *command) { } if (!shell.ws_conn && shell_conn_ws_server(0)) { - return; + return; } shell.stop_query = false; @@ -216,16 +216,16 @@ void shellRunSingleCommandWebsocketImp(char *command) { WS_RES* res = ws_query_timeout(shell.ws_conn, command, shell.args.timeout); int code = ws_errno(res); if (code != 0) { - et = taosGetTimestampUs(); - fprintf(stderr, "\nDB: error: %s (%.6fs)\n", ws_errstr(res), (et - st)/1E6); - if (code == TSDB_CODE_WS_SEND_TIMEOUT || code == TSDB_CODE_WS_RECV_TIMEOUT) { - fprintf(stderr, "Hint: use -t to increase the timeout in seconds\n"); - } else if (code == TSDB_CODE_WS_INTERNAL_ERRO || code == TSDB_CODE_WS_CLOSED) { - fprintf(stderr, "TDengine server is down, will try to reconnect\n"); - shell.ws_conn = NULL; - } - ws_free_result(res); - return; + et = taosGetTimestampUs(); + fprintf(stderr, "\nDB: error: %s (%.6fs)\n", ws_errstr(res), (et - st)/1E6); + if (code == TSDB_CODE_WS_SEND_TIMEOUT || code == TSDB_CODE_WS_RECV_TIMEOUT) { + fprintf(stderr, "Hint: use -t to increase the timeout in seconds\n"); + } else if (code == TSDB_CODE_WS_INTERNAL_ERRO || code == TSDB_CODE_WS_CLOSED) { + fprintf(stderr, "TDengine server is down, will try to reconnect\n"); + shell.ws_conn = NULL; + } + ws_free_result(res); + return; } double execute_time = ws_take_timing(res)/1E6; @@ -233,36 +233,36 @@ void shellRunSingleCommandWebsocketImp(char *command) { if (shellRegexMatch(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) { fprintf(stdout, "Database changed.\r\n\r\n"); fflush(stdout); - ws_free_result(res); + ws_free_result(res); return; } int numOfRows = 0; if (ws_is_update_query(res)) { - numOfRows = ws_affected_rows(res); - et = taosGetTimestampUs(); + numOfRows = ws_affected_rows(res); + et = taosGetTimestampUs(); double total_time = (et - st)/1E3; double net_time = total_time - (double)execute_time; - printf("Query Ok, %d of %d row(s) in database\n", numOfRows, numOfRows); + printf("Query Ok, %d of %d row(s) in database\n", numOfRows, numOfRows); printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time); } else { - int error_no = 0; - numOfRows = shellDumpWebsocket(res, fname, &error_no, printMode, &execute_time); - if (numOfRows < 0) { - ws_free_result(res); - return; - } - et = taosGetTimestampUs(); + int error_no = 0; + numOfRows = shellDumpWebsocket(res, fname, &error_no, printMode, &execute_time); + if (numOfRows < 0) { + ws_free_result(res); + return; + } + et = taosGetTimestampUs(); double total_time = (et - st) / 1E3; double net_time = total_time - execute_time; - if (error_no == 0 && !shell.stop_query) { - printf("Query OK, %d row(s) in set\n", numOfRows); - printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time); - } else { - printf("Query interrupted, %d row(s) in set (%.6fs)\n", numOfRows, - (et - st)/1E6); - printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time); - } + if (error_no == 0 && !shell.stop_query) { + printf("Query OK, %d row(s) in set\n", numOfRows); + printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time); + } else { + printf("Query interrupted, %d row(s) in set (%.6fs)\n", numOfRows, + (et - st)/1E6); + printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time); + } } printf("\n"); ws_free_result(res);